From 1be170b01addf84534b51d68e2d5ac76a1a42ac6 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 31 Dec 2016 16:00:24 -0700 Subject: [PATCH 01/18] Replace BlockAndBuilder with Builder. --- src/librustc_trans/abi.rs | 13 ++-- src/librustc_trans/adt.rs | 21 +++--- src/librustc_trans/asm.rs | 5 +- src/librustc_trans/base.rs | 50 ++++++-------- src/librustc_trans/builder.rs | 57 ++++++++++++++++ src/librustc_trans/callee.rs | 7 +- src/librustc_trans/cleanup.rs | 38 ++++++----- src/librustc_trans/common.rs | 101 ++++------------------------ src/librustc_trans/debuginfo/mod.rs | 5 +- src/librustc_trans/glue.rs | 50 +++++++------- src/librustc_trans/intrinsic.rs | 71 ++++++++++--------- src/librustc_trans/meth.rs | 3 +- src/librustc_trans/mir/block.rs | 101 ++++++++++++++-------------- src/librustc_trans/mir/constant.rs | 7 +- src/librustc_trans/mir/lvalue.rs | 41 ++++------- src/librustc_trans/mir/mod.rs | 29 ++++---- src/librustc_trans/mir/operand.rs | 19 +++--- src/librustc_trans/mir/rvalue.rs | 55 +++++++-------- src/librustc_trans/mir/statement.rs | 11 +-- src/librustc_trans/tvec.rs | 13 ++-- src/test/codegen/stores.rs | 4 +- 21 files changed, 344 insertions(+), 357 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 07b7adc63bffa..18f433eb16249 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -10,7 +10,8 @@ use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use base; -use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; +use builder::Builder; +use common::{type_is_fat_ptr, C_uint}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -236,7 +237,7 @@ impl ArgType { /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) { if self.is_ignore() { return; } @@ -269,7 +270,7 @@ impl ArgType { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.fcx().alloca(ty, "abi_cast"); + let llscratch = bcx.alloca(ty, "abi_cast"); base::Lifetime::Start.call(bcx, llscratch); // ...where we first store the value... @@ -293,14 +294,16 @@ impl ArgType { } } - pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { + pub fn store_fn_arg( + &self, bcx: &Builder, idx: &mut usize, dst: ValueRef + ) { if self.pad.is_some() { *idx += 1; } if self.is_ignore() { return; } - let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); + let val = llvm::get_param(bcx.llfn(), *idx as c_uint); *idx += 1; self.store(bcx, val, dst); } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index e08f29d24729c..29a41acd0e557 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -49,6 +49,7 @@ use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; use common::*; +use builder::Builder; use glue; use base; use machine; @@ -303,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. pub fn trans_switch<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, range_assert: bool @@ -331,7 +332,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { /// Obtain the actual discriminant of a value. pub fn trans_get_discr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, cast_to: Option, @@ -374,7 +375,7 @@ pub fn trans_get_discr<'a, 'tcx>( } fn struct_wrapped_nullable_bitdiscr( - bcx: &BlockAndBuilder, + bcx: &Builder, nndiscr: u64, discrfield: &layout::FieldPath, scrutinee: ValueRef @@ -387,7 +388,7 @@ fn struct_wrapped_nullable_bitdiscr( } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, +fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { let llty = Type::from_integer(bcx.ccx, ity); @@ -415,7 +416,7 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { +pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { discr, .. } @@ -436,7 +437,7 @@ pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: /// Set the discriminant for a new value of the given case of the given /// representation. pub fn trans_set_discr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr + bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr ) { let l = bcx.ccx.layout_of(t); match *l { @@ -484,8 +485,8 @@ pub fn trans_set_discr<'a, 'tcx>( } } -fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool { - bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" +fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { + bcx.ccx.sess().target.target.arch == "arm" || bcx.ccx.sess().target.target.arch == "aarch64" } fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { @@ -498,7 +499,7 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, @@ -560,7 +561,7 @@ pub fn trans_field_ptr<'a, 'tcx>( } fn struct_field_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, st: &layout::Struct, fields: &Vec>, val: MaybeSizedValue, diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 05699fb9de9a5..0912e54bf537e 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -15,6 +15,7 @@ use base; use common::*; use type_of; use type_::Type; +use builder::Builder; use rustc::hir; use rustc::ty::Ty; @@ -25,7 +26,7 @@ use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM pub fn trans_inline_asm<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, ia: &hir::InlineAsm, outputs: Vec<(ValueRef, Ty<'tcx>)>, mut inputs: Vec @@ -61,7 +62,7 @@ pub fn trans_inline_asm<'a, 'tcx>( // Default per-arch clobbers // Basically what clang does - let arch_clobbers = match &bcx.sess().target.target.arch[..] { + let arch_clobbers = match &bcx.ccx.sess().target.target.arch[..] { "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], _ => Vec::new() }; diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 2806be123a936..cb45f9c051264 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -38,7 +38,7 @@ use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use middle::lang_items::StartFnLangItem; use rustc::ty::subst::Substs; use rustc::traits; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::adjustment::CustomCoerceUnsized; use rustc::dep_graph::{DepNode, WorkProduct}; use rustc::hir::map as hir_map; @@ -51,7 +51,7 @@ use adt; use attributes; use builder::Builder; use callee::{Callee}; -use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; +use common::{C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_struct_in_context, C_u64, C_undef}; use common::{CrateContext, FunctionContext}; @@ -161,7 +161,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { } pub fn compare_simd_types<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lhs: ValueRef, rhs: ValueRef, t: Ty<'tcx>, @@ -218,7 +218,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. pub fn unsize_thin_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> @@ -242,7 +242,7 @@ pub fn unsize_thin_ptr<'a, 'tcx>( /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst: ValueRef, @@ -272,10 +272,10 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, assert_eq!(def_a, def_b); let src_fields = def_a.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.tcx(), substs_a, f) + monomorphize::field_ty(bcx.ccx.tcx(), substs_a, f) }); let dst_fields = def_b.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.tcx(), substs_b, f) + monomorphize::field_ty(bcx.ccx.tcx(), substs_b, f) }); let src = adt::MaybeSizedValue::sized(src); @@ -322,7 +322,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx } pub fn cast_shift_expr_rhs( - cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef + cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) } @@ -421,7 +421,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { +pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.ccx, t) { @@ -433,7 +433,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu } } -pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, +pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>, data: ValueRef, extra: ValueRef, dst: ValueRef, @@ -459,7 +459,7 @@ pub fn load_fat_ptr<'a, 'tcx>( (ptr, meta) } -pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { +pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx) { bcx.zext(val, Type::i8(bcx.ccx)) } else { @@ -467,7 +467,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { bcx.trunc(val, Type::i1(bcx.ccx)) } else { @@ -523,11 +523,13 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - dst: ValueRef, - src: ValueRef, - t: Ty<'tcx>, - align: Option) { +pub fn memcpy_ty<'a, 'tcx>( + bcx: &Builder<'a, 'tcx>, + dst: ValueRef, + src: ValueRef, + t: Ty<'tcx>, + align: Option, +) { let ccx = bcx.ccx; if type_is_zero_size(ccx, t) { @@ -553,11 +555,6 @@ pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } -pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { - assert!(!ty.has_param_types()); - bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name) -} - pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { let _s = if ccx.sess().trans_stats() { let mut instance_name = String::new(); @@ -623,7 +620,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // We create an alloca to hold a pointer of type `ret.original_ty` // which will hold the pointer to the right alloca which has the // final ret value - fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") + bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") }; let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let mut llarg_idx = fn_ty.ret.is_indirect() as usize; @@ -756,12 +753,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { // `main` should respect same config for frame pointer elimination as rest of code attributes::set_frame_pointer_elimination(ccx, llfn); - let llbb = unsafe { - let name = CString::new("top").unwrap(); - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr()) - }; - let bld = Builder::with_ccx(ccx); - bld.position_at_end(llbb); + let bld = Builder::new_block(ccx, llfn, "top"); debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 865787f48fc52..6ea048d7ed3bf 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -19,12 +19,16 @@ use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; +use rustc::ty::{Ty, TypeFoldable}; +use type_of; use std::borrow::Cow; use std::ffi::CString; use std::ptr; use syntax_pos::Span; +// All Builders must have an llfn associated with them +#[must_use] pub struct Builder<'a, 'tcx: 'a> { pub llbuilder: BuilderRef, pub ccx: &'a CrateContext<'a, 'tcx>, @@ -46,6 +50,20 @@ fn noname() -> *const c_char { } impl<'a, 'tcx> Builder<'a, 'tcx> { + pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self { + let builder = Builder::with_ccx(ccx); + let llbb = unsafe { + let name = CString::new(name).unwrap(); + llvm::LLVMAppendBasicBlockInContext( + ccx.llcx(), + llfn, + name.as_ptr() + ) + }; + builder.position_at_end(llbb); + builder + } + pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { @@ -57,6 +75,32 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn build_new_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> { + let builder = Builder::with_ccx(self.ccx); + let llbb = unsafe { + let name = CString::new(name).unwrap(); + llvm::LLVMAppendBasicBlockInContext( + self.ccx.llcx(), + self.llfn(), + name.as_ptr() + ) + }; + builder.position_at_end(llbb); + builder + } + + pub fn llfn(&self) -> ValueRef { + unsafe { + llvm::LLVMGetBasicBlockParent(self.llbb()) + } + } + + pub fn llbb(&self) -> BasicBlockRef { + unsafe { + llvm::LLVMGetInsertBlock(self.llbuilder) + } + } + fn count_insn(&self, category: &str) { if self.ccx.sess().trans_stats() { self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); @@ -435,6 +479,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + let builder = Builder::with_ccx(self.ccx); + builder.position_at_start(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn()) + }); + builder.dynamic_alloca(ty, name) + } + + pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef { + assert!(!ty.has_param_types()); + self.alloca(type_of::type_of(self.ccx, ty), name) + } + pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef { self.count_insn("alloca"); unsafe { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 1abe25ea6073e..aabd3083ff53c 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -23,7 +23,6 @@ use rustc::traits; use abi::{Abi, FnType}; use attributes; use base; -use base::*; use common::{ self, CrateContext, FunctionContext, SharedCrateContext }; @@ -348,7 +347,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llenv = if env_arg.is_indirect() { llargs[self_idx] } else { - let scratch = alloc_ty(&bcx, closure_ty, "self"); + let scratch = bcx.alloca_ty(closure_ty, "self"); let mut llarg_idx = self_idx; env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch @@ -365,12 +364,12 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.schedule_drop_mem(MaybeSizedValue::sized(llenv), closure_ty); + let self_scope = fcx.schedule_drop_mem(&bcx, MaybeSizedValue::sized(llenv), closure_ty); let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let normal_bcx = bcx.build_new_block("normal-return"); llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 4e59ea3f6c5ed..bb8faefffecd4 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -21,7 +21,8 @@ use llvm::BasicBlockRef; use base; use adt::MaybeSizedValue; -use common::{BlockAndBuilder, FunctionContext, Funclet}; +use builder::Builder; +use common::{FunctionContext, Funclet}; use glue; use type_::Type; use rustc::ty::Ty; @@ -42,7 +43,7 @@ pub struct DropValue<'tcx> { } impl<'tcx> DropValue<'tcx> { - fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { + fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) { glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } @@ -52,13 +53,13 @@ impl<'tcx> DropValue<'tcx> { /// landing_pad -> ... cleanups ... -> [resume] /// /// This should only be called once per function, as it creates an alloca for the landingpad. - fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { + fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); - let bcx = fcx.build_new_block("cleanup_unwind"); + let bcx = bcx.build_new_block("cleanup_unwind"); let llpersonality = bcx.ccx.eh_personality(); bcx.set_personality_fn(llpersonality); - if base::wants_msvc_seh(fcx.ccx.sess()) { + if base::wants_msvc_seh(bcx.ccx.sess()) { let pad = bcx.cleanup_pad(None, &[]); let funclet = Some(Funclet::new(pad)); self.trans(funclet.as_ref(), &bcx); @@ -68,10 +69,10 @@ impl<'tcx> DropValue<'tcx> { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality // function and this is what the EH proposal example uses. - let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); + let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false); // The only landing pad clause will be 'cleanup' - let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn()); // The landing pad block is a cleanup bcx.set_cleanup(llretval); @@ -79,7 +80,7 @@ impl<'tcx> DropValue<'tcx> { // Insert cleanup instructions into the cleanup block self.trans(None, &bcx); - if !bcx.sess().target.target.options.custom_unwind_resume { + if !bcx.ccx.sess().target.target.options.custom_unwind_resume { bcx.resume(llretval); } else { let exc_ptr = bcx.extract_value(llretval, 0); @@ -94,7 +95,9 @@ impl<'tcx> DropValue<'tcx> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` - pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupScope<'tcx> { + pub fn schedule_drop_mem( + &self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> + ) -> CleanupScope<'tcx> { if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, @@ -102,7 +105,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { skip_dtor: false, }; - CleanupScope::new(self, drop) + CleanupScope::new(bcx, drop) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -110,8 +113,9 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, val: MaybeSizedValue, ty: Ty<'tcx>) - -> CleanupScope<'tcx> { + pub fn schedule_drop_adt_contents( + &self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> + ) -> CleanupScope<'tcx> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } @@ -122,16 +126,16 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { skip_dtor: true, }; - CleanupScope::new(self, drop) + CleanupScope::new(bcx, drop) } } impl<'tcx> CleanupScope<'tcx> { - fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { + fn new<'a>(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { cleanup: Some(drop_val), - landing_pad: if !fcx.ccx.sess().no_landing_pads() { - Some(drop_val.get_landing_pad(fcx)) + landing_pad: if !bcx.ccx.sess().no_landing_pads() { + Some(drop_val.get_landing_pad(bcx)) } else { None }, @@ -145,7 +149,7 @@ impl<'tcx> CleanupScope<'tcx> { } } - pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) { + pub fn trans<'a>(self, bcx: &'a Builder<'a, 'tcx>) { if let Some(cleanup) = self.cleanup { cleanup.trans(None, &bcx); } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7e7bd15dc6e5a..09be09a2b0aec 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -12,7 +12,6 @@ //! Code that is useful in various trans modules. -use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; @@ -37,7 +36,6 @@ use rustc::hir; use libc::{c_uint, c_char}; use std::borrow::Cow; use std::iter; -use std::ops::Deref; use std::ffi::CString; use syntax::ast; @@ -235,8 +233,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, - - alloca_builder: Builder<'a, 'tcx>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { @@ -247,30 +243,18 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { llfn: llfndecl, alloca_insert_pt: None, ccx: ccx, - alloca_builder: Builder::with_ccx(ccx), - }; - - let val = { - let entry_bcx = fcx.build_new_block("entry-block"); - let val = entry_bcx.load(C_null(Type::i8p(ccx))); - fcx.alloca_builder.position_at_start(entry_bcx.llbb()); - val }; + let entry_bcx = Builder::new_block(fcx.ccx, fcx.llfn, "entry-block"); + entry_bcx.position_at_start(entry_bcx.llbb()); // Use a dummy instruction as the insertion point for all allocas. // This is later removed in the drop of FunctionContext. - fcx.alloca_insert_pt = Some(val); + fcx.alloca_insert_pt = Some(entry_bcx.load(C_null(Type::i8p(ccx)))); fcx } - pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(unsafe { - llvm::LLVMGetFirstBasicBlock(self.llfn) - }, self) - } - - pub fn new_block(&'a self, name: &str) -> BasicBlockRef { + pub fn new_block(&self, name: &str) -> BasicBlockRef { unsafe { let name = CString::new(name).unwrap(); llvm::LLVMAppendBasicBlockInContext( @@ -281,12 +265,14 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } - pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(self.new_block(name), self) + pub fn build_new_block(&self, name: &str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn, name) } - pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { - self.alloca_builder.dynamic_alloca(ty, name) + pub fn get_entry_block(&'a self) -> Builder<'a, 'tcx> { + let builder = Builder::with_ccx(self.ccx); + builder.position_at_end(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn) }); + builder } } @@ -298,65 +284,6 @@ impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { } } -#[must_use] -pub struct BlockAndBuilder<'a, 'tcx: 'a> { - // The BasicBlockRef returned from a call to - // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic - // block to the function pointed to by llfn. We insert - // instructions into that block by way of this block context. - // The block pointing to this one in the function's digraph. - llbb: BasicBlockRef, - - // The function context for the function to which this block is - // attached. - fcx: &'a FunctionContext<'a, 'tcx>, - - builder: Builder<'a, 'tcx>, -} - -impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { - pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self { - let builder = Builder::with_ccx(fcx.ccx); - // Set the builder's position to this block's end. - builder.position_at_end(llbb); - BlockAndBuilder { - llbb: llbb, - fcx: fcx, - builder: builder, - } - } - - pub fn at_start(&self, f: F) -> R - where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R - { - self.position_at_start(self.llbb); - let r = f(self); - self.position_at_end(self.llbb); - r - } - - pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> { - self.fcx - } - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.ccx.tcx() - } - pub fn sess(&self) -> &'a Session { - self.ccx.sess() - } - - pub fn llbb(&self) -> BasicBlockRef { - self.llbb - } -} - -impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> { - type Target = Builder<'a, 'tcx>; - fn deref(&self) -> &Self::Target { - &self.builder - } -} - /// A structure representing an active landing pad for the duration of a basic /// block. /// @@ -725,7 +652,7 @@ pub fn langcall(tcx: TyCtxt, // of Java. (See related discussion on #1877 and #10183.) pub fn build_unchecked_lshift<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { @@ -736,7 +663,7 @@ pub fn build_unchecked_lshift<'a, 'tcx>( } pub fn build_unchecked_rshift<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef + bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid @@ -749,13 +676,13 @@ pub fn build_unchecked_rshift<'a, 'tcx>( } } -fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { +fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { let rhs_llty = val_ty(rhs); bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) } pub fn shift_mask_val<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, llty: Type, mask_llty: Type, invert: bool diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 86099d241df68..9117f49cf3ea5 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,7 +27,8 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use abi::Abi; -use common::{CrateContext, BlockAndBuilder}; +use common::CrateContext; +use builder::Builder; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -423,7 +424,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, dbg_context: &FunctionDebugContext, variable_name: ast::Name, variable_type: Ty<'tcx>, diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 62141369caec1..c27576f9016ae 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -35,16 +35,17 @@ use type_::Type; use value::Value; use Disr; use cleanup::CleanupScope; +use builder::Builder; use syntax_pos::DUMMY_SP; pub fn trans_exchange_free_ty<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, ptr: MaybeSizedValue, content_ty: Ty<'tcx> ) { - let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem); - let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty))); + let def_id = langcall(bcx.ccx.tcx(), None, "", BoxFreeFnLangItem); + let substs = bcx.ccx.tcx().mk_substs(iter::once(Kind::from(content_ty))); let callee = Callee::def(bcx.ccx, def_id, substs); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); @@ -93,12 +94,12 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t } } -fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) { +fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) { call_drop_glue(bcx, args, t, false, None) } pub fn call_drop_glue<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, mut args: MaybeSizedValue, t: Ty<'tcx>, skip_dtor: bool, @@ -232,7 +233,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { let shallow_drop = def.is_union(); - let tcx = bcx.tcx(); + let tcx = bcx.ccx.tcx(); let def = t.ty_adt_def().unwrap(); @@ -245,7 +246,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. let contents_scope = if !shallow_drop { - bcx.fcx().schedule_drop_adt_contents(ptr, t) + fcx.schedule_drop_adt_contents(&bcx, ptr, t) } else { CleanupScope::noop() }; @@ -264,7 +265,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let llret; let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; if let Some(landing_pad) = contents_scope.landing_pad { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let normal_bcx = bcx.build_new_block("normal-return"); llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { @@ -288,8 +289,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi bcx.ret_void(); } -pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, info: ValueRef) +pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef) -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); @@ -331,7 +331,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // Recurse to get the size of the dynamically sized field (must be // the last field). let last_field = def.struct_variant().fields.last().unwrap(); - let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); + let field_ty = monomorphize::field_ty(bcx.ccx.tcx(), substs, last_field); let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding @@ -383,7 +383,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, (bcx.load(size_ptr), bcx.load(align_ptr)) } ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(bcx.tcx()); + let unit_ty = t.sequence_element_type(bcx.ccx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); @@ -397,16 +397,16 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, +fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: MaybeSizedValue, t: Ty<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { - fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, + -> Builder<'a, 'tcx> { + fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>, t: Ty<'tcx>, av: adt::MaybeSizedValue, variant: &'tcx ty::VariantDef, substs: &Substs<'tcx>) { - let tcx = cx.tcx(); + let tcx = cx.ccx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); @@ -417,7 +417,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, let mut cx = cx; match t.sty { ty::TyClosure(def_id, substs) => { - for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { + for (i, upvar_ty) in substs.upvar_tys(def_id, cx.ccx.tcx()).enumerate() { let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty); } @@ -425,12 +425,12 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, ty::TyArray(_, n) => { let base = get_dataptr(&cx, ptr.value); let len = C_uint(cx.ccx, n); - let unit_ty = t.sequence_element_type(cx.tcx()); + let unit_ty = t.sequence_element_type(cx.ccx.tcx()); cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); } ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(cx.tcx()); + let unit_ty = t.sequence_element_type(cx.ccx.tcx()); cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta, |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); } @@ -442,7 +442,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); + let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.ccx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i); let ptr = if cx.ccx.shared().type_is_sized(field_ty) { @@ -470,7 +470,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { - let tcx = cx.tcx(); + let tcx = cx.ccx.tcx(); drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize); // Create a fall-through basic block for the "else" case of @@ -486,15 +486,15 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void"); + let ret_void_cx = cx.build_new_block("enum-iter-ret-void"); ret_void_cx.ret_void(); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); - let next_cx = cx.fcx().build_new_block("enum-iter-next"); + let next_cx = cx.build_new_block("enum-iter-next"); for variant in &adt.variants { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); - let variant_cx = cx.fcx().build_new_block(&variant_cx_name); + let variant_cx = cx.build_new_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); iter_variant(&variant_cx, t, ptr, variant, substs); @@ -508,7 +508,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, }, _ => { - cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) + cx.ccx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) } } return cx; diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 7c026cb153037..af3050fef0d68 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -28,6 +28,7 @@ use Disr; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; +use builder::Builder; use rustc::session::Session; use syntax_pos::Span; @@ -87,14 +88,15 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + fcx: &FunctionContext, callee_ty: Ty<'tcx>, fn_ty: &FnType, llargs: &[ValueRef], llresult: ValueRef, span: Span) { let ccx = bcx.ccx; - let tcx = bcx.tcx(); + let tcx = ccx.tcx(); let (def_id, substs, fty) = match callee_ty.sty { ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), @@ -125,7 +127,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } "try" => { - try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); + try_intrinsic(bcx, fcx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } "breakpoint" => { @@ -533,7 +535,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // qux` to be converted into `foo, bar, baz, qux`, integer // arguments to be truncated as needed and pointers to be // cast. - fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: &intrinsics::Type, arg_type: Ty<'tcx>, llarg: ValueRef) @@ -634,7 +636,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } } -fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>, @@ -670,7 +672,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } fn memset_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, volatile: bool, ty: Ty<'tcx>, dst: ValueRef, @@ -686,19 +688,20 @@ fn memset_intrinsic<'a, 'tcx>( } fn try_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, + fcx: &FunctionContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef, ) { - if bcx.sess().no_landing_pads() { + if bcx.ccx.sess().no_landing_pads() { bcx.call(func, &[data], None); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, local_ptr, dest); + trans_msvc_try(bcx, fcx, func, data, local_ptr, dest); } else { - trans_gnu_try(bcx, func, data, local_ptr, dest); + trans_gnu_try(bcx, fcx, func, data, local_ptr, dest); } } @@ -709,24 +712,25 @@ fn try_intrinsic<'a, 'tcx>( // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + fcx: &FunctionContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let llfn = get_rust_try_fn(fcx, &mut |bcx| { let ccx = bcx.ccx; bcx.set_personality_fn(bcx.ccx.eh_personality()); - let normal = bcx.fcx().build_new_block("normal"); - let catchswitch = bcx.fcx().build_new_block("catchswitch"); - let catchpad = bcx.fcx().build_new_block("catchpad"); - let caught = bcx.fcx().build_new_block("caught"); + let normal = bcx.build_new_block("normal"); + let catchswitch = bcx.build_new_block("catchswitch"); + let catchpad = bcx.build_new_block("catchpad"); + let caught = bcx.build_new_block("caught"); - let func = llvm::get_param(bcx.fcx().llfn, 0); - let data = llvm::get_param(bcx.fcx().llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + let func = llvm::get_param(bcx.llfn(), 0); + let data = llvm::get_param(bcx.llfn(), 1); + let local_ptr = llvm::get_param(bcx.llfn(), 2); // We're generating an IR snippet that looks like: // @@ -768,7 +772,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = bcx.fcx().alloca(i64p, "slot"); + let slot = bcx.alloca(i64p, "slot"); bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -812,12 +816,13 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + fcx: &FunctionContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let llfn = get_rust_try_fn(fcx, &mut |bcx| { let ccx = bcx.ccx; // Translates the shims described above: @@ -837,12 +842,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx().build_new_block("then"); - let catch = bcx.fcx().build_new_block("catch"); + let then = bcx.build_new_block("then"); + let catch = bcx.build_new_block("catch"); - let func = llvm::get_param(bcx.fcx().llfn, 0); - let data = llvm::get_param(bcx.fcx().llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + let func = llvm::get_param(bcx.llfn(), 0); + let data = llvm::get_param(bcx.llfn(), 1); + let local_ptr = llvm::get_param(bcx.llfn(), 2); bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None); then.ret(C_i32(ccx, 0)); @@ -854,7 +859,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); + let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn()); catch.add_clause(vals, C_null(Type::i8p(ccx))); let ptr = catch.extract_value(vals, 0); catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None); @@ -873,7 +878,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, name: &str, inputs: Vec>, output: Ty<'tcx>, - trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) + trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); @@ -894,7 +899,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // // This function is only generated once and is then cached. fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) + trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; if let Some(llfn) = ccx.rust_try_fn().get() { @@ -920,7 +925,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, name: &str, callee_ty: Ty<'tcx>, llargs: &[ValueRef], @@ -935,7 +940,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bcx.sess(), span, + bcx.ccx.sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); @@ -957,7 +962,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( - let tcx = bcx.tcx(); + let tcx = bcx.ccx.tcx(); let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig()); let arg_tys = sig.inputs(); diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index cf50e7be2afb5..1765d6e73b32c 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -13,6 +13,7 @@ use llvm::{ValueRef, get_params}; use rustc::traits; use callee::{Callee, CalleeData}; use common::*; +use builder::Builder; use consts; use declare; use glue; @@ -27,7 +28,7 @@ use rustc::ty; const VTABLE_OFFSET: usize = 3; /// Extracts a method from a trait object's vtable, at the specified index. -pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, llvtable: ValueRef, vtable_index: usize) -> ValueRef { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 0321417b153aa..a818694267b90 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -17,7 +17,8 @@ use abi::{Abi, FnType, ArgType}; use adt::{self, MaybeSizedValue}; use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, BlockAndBuilder, Funclet}; +use builder::Builder; +use common::{self, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use Disr; @@ -57,7 +58,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_bundle = funclet.map(|l| l.bundle()); - let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { + let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| { let lltarget = this.blocks[bb]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[bb] { @@ -74,7 +75,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; - let llblock = |this: &mut Self, target: mir::BasicBlock| { + let llblock = |this: &mut Self, bcx: &Builder, target: mir::BasicBlock| { let lltarget = this.blocks[target]; if let Some(cp) = cleanup_pad { @@ -84,7 +85,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.build_new_block(name); + let trampoline = bcx.build_new_block(name); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() } @@ -121,7 +122,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); Lifetime::End.call(&bcx, ps); - if !bcx.sess().target.target.options.custom_unwind_resume { + if !bcx.ccx.sess().target.target.options.custom_unwind_resume { bcx.resume(lp); } else { let exc_ptr = bcx.extract_value(lp, 0); @@ -138,14 +139,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { let cond = self.trans_operand(&bcx, cond); - let lltrue = llblock(self, true_bb); - let llfalse = llblock(self, false_bb); + let lltrue = llblock(self, &bcx, true_bb); + let llfalse = llblock(self, &bcx, false_bb); bcx.cond_br(cond.immediate(), lltrue, llfalse); } mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); - let ty = discr_lvalue.ty.to_ty(bcx.tcx()); + let ty = discr_lvalue.ty.to_ty(bcx.ccx.tcx()); let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true); let mut bb_hist = FxHashMap(); @@ -158,7 +159,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // code. This is especially helpful in cases like an if-let on a huge enum. // Note: This optimization is only valid for exhaustive matches. Some((&&bb, &c)) if c > targets.len() / 2 => { - (Some(bb), llblock(self, bb)) + (Some(bb), llblock(self, &bcx, bb)) } // We're generating an exhaustive switch, so the else branch // can't be hit. Branching to an unreachable instruction @@ -169,7 +170,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { assert_eq!(adt_def.variants.len(), targets.len()); for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { if default_bb != Some(target) { - let llbb = llblock(self, target); + let llbb = llblock(self, &bcx, target); let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); bcx.add_case(switch, llval, llbb) } @@ -180,10 +181,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = base::to_immediate(&bcx, discr, switch_ty); - let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); + let switch = bcx.switch(discr, llblock(self, &bcx, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty); - let llbb = llblock(self, *target); + let llbb = llblock(self, &bcx, *target); bcx.add_case(switch, val.llval, llbb) } } @@ -202,7 +203,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Lvalue(tr_lvalue) => { OperandRef { val: Ref(tr_lvalue.llval), - ty: tr_lvalue.ty.to_ty(bcx.tcx()) + ty: tr_lvalue.ty.to_ty(bcx.ccx.tcx()) } } }; @@ -232,7 +233,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::TerminatorKind::Drop { ref location, target, unwind } => { - let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); + let ty = location.ty(&self.mir, bcx.ccx.tcx()).to_ty(bcx.ccx.tcx()); let ty = self.monomorphize(&ty); // Double check for necessity to drop @@ -260,7 +261,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { drop_fn, args, self.blocks[target], - llblock(self, unwind), + llblock(self, &bcx, unwind), cleanup_bundle ); } else { @@ -300,7 +301,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); // Create the failure block and the conditional branch to it. - let lltarget = llblock(self, target); + let lltarget = llblock(self, &bcx, target); let panic_block = self.fcx.build_new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb()); @@ -313,7 +314,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { self.set_debug_loc(&bcx, terminator.source_info); // Get the location information. - let loc = bcx.sess().codemap().lookup_char_pos(span.lo); + let loc = bcx.ccx.sess().codemap().lookup_char_pos(span.lo); let filename = Symbol::intern(&loc.file.name).as_str(); let filename = C_str_slice(bcx.ccx, filename); let line = C_u32(bcx.ccx, loc.line as u32); @@ -363,15 +364,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if const_cond == Some(!expected) { if let Some(err) = const_err { let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = bcx.tcx().sess.struct_span_warn( + let mut diag = bcx.ccx.tcx().sess.struct_span_warn( span, "this expression will panic at run-time"); - note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); + note_const_eval_err(bcx.ccx.tcx(), &err, span, "expression", &mut diag); diag.emit(); } } // Obtain the panic entry point. - let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); + let def_id = common::langcall(bcx.ccx.tcx(), Some(span), "", lang_item); let callee = Callee::def(bcx.ccx, def_id, bcx.ccx.empty_substs_for_def_id(def_id)); let llfn = callee.reify(bcx.ccx); @@ -381,7 +382,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.invoke(llfn, &args, self.unreachable_block(), - llblock(self, unwind), + llblock(self, &bcx, unwind), cleanup_bundle); } else { bcx.call(llfn, &args, cleanup_bundle); @@ -410,12 +411,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => bug!("{} is not callable", callee.ty) }; - let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig); + let sig = bcx.ccx.tcx().erase_late_bound_regions_and_normalize(sig); // Handle intrinsics old trans wants Expr's for, ourselves. let intrinsic = match (&callee.ty.sty, &callee.data) { (&ty::TyFnDef(def_id, ..), &Intrinsic) => { - Some(bcx.tcx().item_name(def_id).as_str()) + Some(bcx.ccx.tcx().item_name(def_id).as_str()) } _ => None }; @@ -443,7 +444,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let extra_args = &args[sig.inputs().len()..]; let extra_args = extra_args.iter().map(|op_arg| { - let op_ty = op_arg.ty(&self.mir, bcx.tcx()); + let op_ty = op_arg.ty(&self.mir, bcx.ccx.tcx()); self.monomorphize(&op_ty) }).collect::>(); let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args); @@ -545,7 +546,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bug!("Cannot use direct operand with an intrinsic call") }; - trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, + trans_intrinsic_call(&bcx, self.fcx, callee.ty, &fn_ty, &llargs, dest, terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { @@ -579,20 +580,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let invokeret = bcx.invoke(fn_ptr, &llargs, ret_bcx, - llblock(self, cleanup), + llblock(self, &bcx, cleanup), cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); if let Some((_, target)) = *destination { let ret_bcx = self.build_block(target); - ret_bcx.at_start(|ret_bcx| { - self.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - ty: sig.output(), - }; - self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); - }); + ret_bcx.position_at_start(ret_bcx.llbb()); + self.set_debug_loc(&ret_bcx, terminator.source_info); + let op = OperandRef { + val: Immediate(invokeret), + ty: sig.output(), + }; + self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); + ret_bcx.position_at_end(ret_bcx.llbb()); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); @@ -613,7 +614,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_argument(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -634,7 +635,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let imm_op = |x| OperandRef { val: Immediate(x), // We won't be checking the type again. - ty: bcx.tcx().types.err + ty: bcx.ccx.tcx().types.err }; self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee); self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee); @@ -689,7 +690,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_arguments_untupled(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -765,13 +766,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } - fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { let ccx = bcx.ccx; if let Some(slot) = self.llpersonalityslot { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.fcx().alloca(llretty, "personalityslot"); + let slot = bcx.alloca(llretty, "personalityslot"); self.llpersonalityslot = Some(slot); Lifetime::Start.call(bcx, slot); slot @@ -815,11 +816,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }) } - pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(self.blocks[bb], self.fcx) + pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { + let builder = Builder::with_ccx(self.fcx.ccx); + builder.position_at_end(self.blocks[bb]); + builder } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { // If the return is ignored, we can just return a do-nothing ReturnDest @@ -836,14 +839,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return if fn_ret_ty.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); + let tmp = bcx.alloca_ty(ret_ty, "tmp_ret"); llargs.push(tmp); ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); + let tmp = bcx.alloca_ty(ret_ty, "tmp_ret"); ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) @@ -864,17 +867,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } - fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>, src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { - let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx())); + let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.ccx.tcx())); let out_type_size = llbitsize_of_real(bcx.ccx, llouttype); if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. let f = Callee::def(bcx.ccx, def_id, substs); let ty = match f.ty.sty { - ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), + ty::TyFnDef(.., f) => bcx.ccx.tcx().mk_fn_ptr(f), _ => f.ty }; val = OperandRef { @@ -895,7 +898,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Stores the return value of a function call into it's final location. fn store_return(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, dest: ReturnDest, ret_ty: ArgType, op: OperandRef<'tcx>) { @@ -911,7 +914,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret"); + let tmp = bcx.alloca_ty(op.ty, "tmp_ret"); ret_ty.store(bcx, op.immediate(), tmp); self.trans_load(bcx, tmp, op.ty) } else { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 700894c255da6..e15c25df91139 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -24,10 +24,11 @@ use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; +use builder::Builder; +use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; -use common::{const_to_opt_u128}; +use common::const_to_opt_u128; use consts; use monomorphize::{self, Instance}; use type_of; @@ -900,7 +901,7 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_constant(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, constant: &mir::Constant<'tcx>) -> Const<'tcx> { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 0cd7f007c5df9..b09ce73400383 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -14,8 +14,8 @@ use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use adt; -use base; -use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef}; +use builder::Builder; +use common::{self, CrateContext, C_uint, C_undef}; use consts; use machine; use type_of::type_of; @@ -44,16 +44,6 @@ impl<'tcx> LvalueRef<'tcx> { LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } - pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> LvalueRef<'tcx> - { - assert!(!ty.has_erasable_regions()); - let lltemp = base::alloc_ty(bcx, ty, name); - LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) - } - pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { let ty = self.ty.to_ty(ccx.tcx()); match ty.sty { @@ -69,13 +59,13 @@ impl<'tcx> LvalueRef<'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_lvalue(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> LvalueRef<'tcx> { debug!("trans_lvalue(lvalue={:?})", lvalue); let ccx = bcx.ccx; - let tcx = bcx.tcx(); + let tcx = ccx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { @@ -177,7 +167,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llindex = C_uint(bcx.ccx, from); let llbase = project_index(llindex); - let base_ty = tr_base.ty.to_ty(bcx.tcx()); + let base_ty = tr_base.ty.to_ty(bcx.ccx.tcx()); match base_ty.sty { ty::TyArray(..) => { // must cast the lvalue pointer type to the new @@ -214,7 +204,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Perform an action using the given Lvalue. // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot // is created first, then used as an operand to update the Lvalue. - pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + pub fn with_lvalue_ref(&mut self, bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, f: F) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U { @@ -223,9 +213,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Lvalue(lvalue) => f(self, lvalue), LocalRef::Operand(None) => { let lvalue_ty = self.monomorphized_lvalue_ty(lvalue); - let lvalue = LvalueRef::alloca(bcx, - lvalue_ty, - "lvalue_temp"); + assert!(!lvalue_ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp"); + let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty)); let ret = f(self, lvalue); let op = self.trans_load(bcx, lvalue.llval, lvalue_ty); self.locals[index] = LocalRef::Operand(Some(op)); @@ -254,18 +244,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { /// than we are. /// /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, - llindex: ValueRef) - -> ValueRef - { - let ccx = bcx.ccx; + fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type()); + let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type()); if index_size < int_size { - bcx.zext(llindex, ccx.int_type()) + bcx.zext(llindex, bcx.ccx.int_type()) } else if index_size > int_size { - bcx.trunc(llindex, ccx.int_type()) + bcx.trunc(llindex, bcx.ccx.int_type()) } else { llindex } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index dc8c6e89df9a4..94427ee9aefa6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -19,7 +19,8 @@ use rustc::infer::TransNormalize; use rustc::ty::TypeFoldable; use session::config::FullDebugInfo; use base; -use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; +use builder::Builder; +use common::{self, CrateContext, FunctionContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::{self, Instance}; use abi::FnType; @@ -106,7 +107,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) } - pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { + pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) { let (scope, span) = self.debug_loc(source_info); debuginfo::set_source_location(&self.debug_context, bcx, scope, span); } @@ -258,7 +259,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // User variable let source_info = decl.source_info.unwrap(); let debug_scope = mircx.scopes[source_info.scope]; - let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo; + let dbg = debug_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo; if !lvalue_locals.contains(local.index()) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); @@ -266,7 +267,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( } debug!("alloc: {:?} ({}) -> lvalue", local, name); - let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(ty, &name.as_str()); + let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)); if dbg { let (scope, span) = mircx.debug_loc(source_info); declare_local(&bcx, &mircx.debug_context, name, ty, scope, @@ -282,7 +285,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); - LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local)); + LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))) } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the @@ -347,20 +352,20 @@ pub fn trans_mir<'a, 'tcx: 'a>( /// Produce, for each argument, a `ValueRef` pointing at the /// argument's value. As arguments are lvalues, these are always /// indirect. -fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mircx: &MirContext<'a, 'tcx>, scopes: &IndexVec, lvalue_locals: &BitVector) -> Vec> { let mir = mircx.mir; - let fcx = bcx.fcx(); - let tcx = bcx.tcx(); + let fcx = mircx.fcx; + let tcx = bcx.ccx.tcx(); let mut idx = 0; let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; - let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo { + let arg_scope = if arg_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo { Some(arg_scope.scope_metadata) } else { None @@ -381,7 +386,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, _ => bug!("spread argument isn't a tuple?!") }; - let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index)); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let dst = bcx.struct_gep(lltemp, i); let arg = &mircx.fn_ty.args[idx]; @@ -420,7 +425,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let arg = &mircx.fn_ty.args[idx]; idx += 1; - let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { + let llval = if arg.is_indirect() && bcx.ccx.sess().opts.debuginfo != FullDebugInfo { // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up, unless // we emit extra-debug-info, which requires local allocas :(. @@ -462,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index)); if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index a4af5f9e22cc4..c6eedc0d2efda 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -14,7 +14,8 @@ use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{self, BlockAndBuilder}; +use common; +use builder::Builder; use value::Value; use type_of; use type_::Type; @@ -85,8 +86,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// If this operand is a Pair, we return an /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) - -> OperandRef<'tcx> { + pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. let llty = type_of::type_of(bcx.ccx, self.ty); @@ -107,8 +107,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// If this operand is a pair in an Immediate, /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) - -> OperandRef<'tcx> { + pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. if common::type_is_imm_pair(bcx.ccx, self.ty) { @@ -136,7 +135,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_load(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, llval: ValueRef, ty: Ty<'tcx>) -> OperandRef<'tcx> @@ -165,7 +164,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_consume(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> OperandRef<'tcx> { @@ -212,12 +211,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // for most lvalues, to consume them we just load them // out from their home let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); + let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx()); self.trans_load(bcx, tr_lvalue.llval, ty) } pub fn trans_operand(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>) -> OperandRef<'tcx> { @@ -242,7 +241,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn store_operand(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lldest: ValueRef, operand: OperandRef<'tcx>, align: Option) { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index dac81468be950..00dd49d84e31c 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -17,8 +17,9 @@ use middle::lang_items::ExchangeMallocFnLangItem; use asm; use base; +use builder::Builder; use callee::Callee; -use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; +use common::{self, val_ty, C_bool, C_null, C_uint}; use common::{C_integral}; use adt; use machine; @@ -35,10 +36,10 @@ use super::lvalue::{LvalueRef}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_rvalue(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, dest: LvalueRef<'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> BlockAndBuilder<'a, 'tcx> + -> Builder<'a, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", Value(dest.llval), rvalue); @@ -79,7 +80,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // index into the struct, and this case isn't // important enough for it. debug!("trans_rvalue: creating ugly alloca"); - let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); + let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp"); base::store_ty(&bcx, llval, lltemp, operand.ty); lltemp } @@ -91,7 +92,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Repeat(ref elem, ref count) => { let tr_elem = self.trans_operand(&bcx, elem); - let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); + let size = count.value.as_u64(bcx.ccx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { @@ -103,7 +104,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); - let dest_ty = dest.ty.to_ty(bcx.tcx()); + let dest_ty = dest.ty.to_ty(bcx.ccx.tcx()); adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); @@ -119,7 +120,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }, _ => { // If this is a tuple or closure, we need to translate GEP indices. - let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); + let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.ccx.tcx())); let translation = if let Layout::Univariant { ref variant, .. } = *layout { Some(&variant.memory_index) } else { @@ -149,7 +150,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { let lvalue = self.trans_lvalue(&bcx, output); - (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) + (lvalue.llval, lvalue.ty.to_ty(bcx.ccx.tcx())) }).collect(); let input_vals = inputs.iter().map(|input| { @@ -170,9 +171,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_rvalue_operand(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) + -> (Builder<'a, 'tcx>, OperandRef<'tcx>) { assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); @@ -344,9 +345,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Ref(_, bk, ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - let ref_ty = bcx.tcx().mk_ref( - bcx.tcx().mk_region(ty::ReErased), + let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx()); + let ref_ty = bcx.ccx.tcx().mk_ref( + bcx.ccx.tcx().mk_region(ty::ReErased), ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } ); @@ -371,7 +372,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let operand = OperandRef { val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)), - ty: bcx.tcx().types.usize, + ty: bcx.ccx.tcx().types.usize, }; (bcx, operand) } @@ -398,7 +399,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), + ty: op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty), }; (bcx, operand) } @@ -408,8 +409,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let result = self.trans_scalar_checked_binop(&bcx, op, lhs.immediate(), rhs.immediate(), lhs.ty); - let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); - let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]); + let val_ty = op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty); + let operand_ty = bcx.ccx.tcx().intern_tup(&[val_ty, bcx.ccx.tcx().types.bool]); let operand = OperandRef { val: result, ty: operand_ty @@ -443,16 +444,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let align = type_of::align_of(bcx.ccx, content_ty); let llalign = C_uint(bcx.ccx, align); let llty_ptr = llty.ptr_to(); - let box_ty = bcx.tcx().mk_box(content_ty); + let box_ty = bcx.ccx.tcx().mk_box(content_ty); // Allocate space: - let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { + let def_id = match bcx.ccx.tcx().lang_items.require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); + bcx.ccx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); } }; - let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])) + let r = Callee::def(bcx.ccx, def_id, bcx.ccx.tcx().intern_substs(&[])) .reify(bcx.ccx); let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); @@ -477,7 +478,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_scalar_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -552,7 +553,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_fat_ptr_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs_addr: ValueRef, lhs_extra: ValueRef, @@ -599,7 +600,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_scalar_checked_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -617,7 +618,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // will only succeed if both operands are constant. // This is necessary to determine when an overflow Assert // will always panic at runtime, and produce a warning. - if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { + if let Some((val, of)) = const_scalar_checked_binop(bcx.ccx.tcx(), op, lhs, rhs, input_ty) { return OperandValue::Pair(val, C_bool(bcx.ccx, of)); } @@ -681,12 +682,12 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef { +fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{TyInt, TyUint}; - let tcx = bcx.tcx(); + let tcx = bcx.ccx.tcx(); let new_sty = match ty.sty { TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index cc85f68c197ec..48fc9720e4b83 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -11,7 +11,8 @@ use rustc::mir; use base; -use common::{self, BlockAndBuilder}; +use common; +use builder::Builder; use super::MirContext; use super::LocalRef; @@ -20,9 +21,9 @@ use super::super::disr::Disr; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, statement: &mir::Statement<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { + -> Builder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); self.set_debug_loc(&bcx, statement.source_info); @@ -77,10 +78,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_storage_liveness(&self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, intrinsic: base::Lifetime) - -> BlockAndBuilder<'a, 'tcx> { + -> Builder<'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Lvalue(tr_lval) = self.locals[index] { intrinsic.call(&bcx, tr_lval.llval); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index c09726fda0810..f6fc4637e1c06 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -9,28 +9,29 @@ // except according to those terms. use llvm; +use builder::Builder; use llvm::ValueRef; use common::*; use rustc::ty::Ty; pub fn slice_for_each<'a, 'tcx, F>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, data_ptr: ValueRef, unit_ty: Ty<'tcx>, len: ValueRef, f: F -) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { +) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx, unit_ty); - let add = |bcx: &BlockAndBuilder, a, b| if zst { + let add = |bcx: &Builder, a, b| if zst { bcx.add(a, b) } else { bcx.inbounds_gep(a, &[b]) }; - let body_bcx = bcx.fcx().build_new_block("slice_loop_body"); - let next_bcx = bcx.fcx().build_new_block("slice_loop_next"); - let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); + let body_bcx = bcx.build_new_block("slice_loop_body"); + let next_bcx = bcx.build_new_block("slice_loop_next"); + let header_bcx = bcx.build_new_block("slice_loop_header"); let start = if zst { C_uint(bcx.ccx, 0usize) diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 9141b7245e35a..6135f49eb711b 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -24,8 +24,8 @@ pub struct Bytes { // dependent alignment #[no_mangle] pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { -// CHECK: %arg1 = alloca [4 x i8] // CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: %arg1 = alloca [4 x i8] // CHECK: store i32 %1, i32* [[TMP]] // CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* @@ -38,8 +38,8 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { // dependent alignment #[no_mangle] pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { -// CHECK: %arg1 = alloca %Bytes // CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: %arg1 = alloca %Bytes // CHECK: store i32 %1, i32* [[TMP]] // CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* From 937e8da349e60d8e56e1ebb3f03a3e394bf3c9eb Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 1 Jan 2017 00:42:09 -0700 Subject: [PATCH 02/18] Purge FunctionContext --- src/librustc_trans/base.rs | 16 ++--- src/librustc_trans/builder.rs | 4 ++ src/librustc_trans/callee.rs | 20 +++--- src/librustc_trans/cleanup.rs | 18 +++-- src/librustc_trans/common.rs | 68 +------------------ .../debuginfo/create_scope_map.rs | 6 +- src/librustc_trans/debuginfo/doc.rs | 2 +- src/librustc_trans/glue.rs | 7 +- src/librustc_trans/intrinsic.rs | 27 ++++---- src/librustc_trans/meth.rs | 5 +- src/librustc_trans/mir/block.rs | 12 ++-- src/librustc_trans/mir/mod.rs | 42 +++++++----- 12 files changed, 82 insertions(+), 145 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index cb45f9c051264..cdd909477fdc9 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -54,7 +54,7 @@ use callee::{Callee}; use common::{C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_struct_in_context, C_u64, C_undef}; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; use common; @@ -590,18 +590,17 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let fcx = FunctionContext::new(ccx, lldecl); let mir = ccx.tcx().item_mir(instance.def); - mir::trans_mir(&fcx, fn_ty, &mir, instance, &sig, abi); + mir::trans_mir(ccx, lldecl, fn_ty, &mir, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, disr: Disr, - llfndecl: ValueRef) { - attributes::inline(llfndecl, attributes::InlineAttr::Hint); - attributes::set_frame_pointer_elimination(ccx, llfndecl); + llfn: ValueRef) { + attributes::inline(llfn, attributes::InlineAttr::Hint); + attributes::set_frame_pointer_elimination(ccx, llfn); let ctor_ty = ccx.tcx().item_type(def_id); let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty); @@ -609,13 +608,12 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let fcx = FunctionContext::new(ccx, llfndecl); - let bcx = fcx.get_entry_block(); + let bcx = Builder::entry_block(ccx, llfn); if !fn_ty.ret.is_ignore() { // But if there are no nested returns, we skip the indirection // and have a single retslot let dest = if fn_ty.ret.is_indirect() { - get_param(fcx.llfn, 0) + get_param(llfn, 0) } else { // We create an alloca to hold a pointer of type `ret.original_ty` // which will hold the pointer to the right alloca which has the diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 6ea048d7ed3bf..610446d316d8e 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -50,6 +50,10 @@ fn noname() -> *const c_char { } impl<'a, 'tcx> Builder<'a, 'tcx> { + pub fn entry_block(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef) -> Self { + Builder::new_block(ccx, llfn, "entry-block") + } + pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self { let builder = Builder::with_ccx(ccx); let llbb = unsafe { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index aabd3083ff53c..e73bfd73e9b07 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -23,9 +23,9 @@ use rustc::traits; use abi::{Abi, FnType}; use attributes; use base; -use common::{ - self, CrateContext, FunctionContext, SharedCrateContext -}; +use builder::Builder; +use common::{self, CrateContext, SharedCrateContext}; +use cleanup::CleanupScope; use adt::MaybeSizedValue; use consts; use declare; @@ -329,8 +329,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( attributes::set_frame_pointer_elimination(ccx, lloncefn); let orig_fn_ty = fn_ty; - let fcx = FunctionContext::new(ccx, lloncefn); - let mut bcx = fcx.get_entry_block(); + let mut bcx = Builder::entry_block(ccx, lloncefn); let callee = Callee { data: Fn(llreffn), @@ -339,7 +338,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // the first argument (`self`) will be the (by value) closure env. - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(lloncefn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let self_idx = fn_ty.ret.is_indirect() as usize; @@ -364,7 +363,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.schedule_drop_mem(&bcx, MaybeSizedValue::sized(llenv), closure_ty); + let self_scope = CleanupScope::schedule_drop_mem( + &bcx, MaybeSizedValue::sized(llenv), closure_ty + ); let llfn = callee.reify(bcx.ccx); let llret; @@ -488,10 +489,9 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let fcx = FunctionContext::new(ccx, llfn); - let bcx = fcx.get_entry_block(); + let bcx = Builder::entry_block(ccx, llfn); - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(llfn); let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize); let llfnpointer = llfnpointer.unwrap_or_else(|| { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index bb8faefffecd4..b41b26e1d06f4 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -22,7 +22,7 @@ use llvm::BasicBlockRef; use base; use adt::MaybeSizedValue; use builder::Builder; -use common::{FunctionContext, Funclet}; +use common::Funclet; use glue; use type_::Type; use rustc::ty::Ty; @@ -93,12 +93,12 @@ impl<'tcx> DropValue<'tcx> { } } -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { +impl<'a, 'tcx> CleanupScope<'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` pub fn schedule_drop_mem( - &self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> + bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> ) -> CleanupScope<'tcx> { - if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, ty: ty, @@ -114,11 +114,11 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. pub fn schedule_drop_adt_contents( - &self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> + bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> ) -> CleanupScope<'tcx> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, @@ -128,10 +128,8 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { CleanupScope::new(bcx, drop) } -} -impl<'tcx> CleanupScope<'tcx> { - fn new<'a>(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { + fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { cleanup: Some(drop_val), landing_pad: if !bcx.ccx.sess().no_landing_pads() { @@ -149,7 +147,7 @@ impl<'tcx> CleanupScope<'tcx> { } } - pub fn trans<'a>(self, bcx: &'a Builder<'a, 'tcx>) { + pub fn trans(self, bcx: &'a Builder<'a, 'tcx>) { if let Some(cleanup) = self.cleanup { cleanup.trans(None, &bcx); } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 09be09a2b0aec..8dcb835350b37 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -13,7 +13,7 @@ //! Code that is useful in various trans modules. use llvm; -use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; +use llvm::{ValueRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; @@ -36,7 +36,6 @@ use rustc::hir; use libc::{c_uint, c_char}; use std::borrow::Cow; use std::iter; -use std::ffi::CString; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; @@ -219,71 +218,6 @@ impl<'a, 'tcx> VariantInfo<'tcx> { } } -// Function context. Every LLVM function we create will have one of these. -pub struct FunctionContext<'a, 'tcx: 'a> { - // The ValueRef returned from a call to llvm::LLVMAddFunction; the - // address of the first instruction in the sequence of - // instructions for this function that will go in the .text - // section of the executable we're generating. - pub llfn: ValueRef, - - // A marker for the place where we want to insert the function's static - // allocas, so that LLVM will coalesce them into a single alloca call. - alloca_insert_pt: Option, - - // This function's enclosing crate context. - pub ccx: &'a CrateContext<'a, 'tcx>, -} - -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - /// Create a function context for the given function. - /// Call FunctionContext::get_entry_block for the first entry block. - pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> { - let mut fcx = FunctionContext { - llfn: llfndecl, - alloca_insert_pt: None, - ccx: ccx, - }; - - let entry_bcx = Builder::new_block(fcx.ccx, fcx.llfn, "entry-block"); - entry_bcx.position_at_start(entry_bcx.llbb()); - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in the drop of FunctionContext. - fcx.alloca_insert_pt = Some(entry_bcx.load(C_null(Type::i8p(ccx)))); - - fcx - } - - pub fn new_block(&self, name: &str) -> BasicBlockRef { - unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMAppendBasicBlockInContext( - self.ccx.llcx(), - self.llfn, - name.as_ptr() - ) - } - } - - pub fn build_new_block(&self, name: &str) -> Builder<'a, 'tcx> { - Builder::new_block(self.ccx, self.llfn, name) - } - - pub fn get_entry_block(&'a self) -> Builder<'a, 'tcx> { - let builder = Builder::with_ccx(self.ccx); - builder.position_at_end(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn) }); - builder - } -} - -impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { - fn drop(&mut self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap()); - } - } -} - /// A structure representing an active landing pad for the duration of a basic /// block. /// diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index f5a8eeacf38ad..c6f8ba7b6dc78 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -14,7 +14,7 @@ use super::utils::{DIB, span_start}; use llvm; use llvm::debuginfo::{DIScope, DISubprogram}; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; use rustc::mir::{Mir, VisibilityScope}; use libc::c_uint; @@ -44,7 +44,7 @@ impl MirDebugScope { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext) +pub fn create_mir_scopes(ccx: &CrateContext, mir: &Mir, debug_context: &FunctionDebugContext) -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), @@ -71,7 +71,7 @@ pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &Funct // Instantiate all scopes. for idx in 0..mir.visibility_scopes.len() { let scope = VisibilityScope::new(idx); - make_mir_scope(fcx.ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); + make_mir_scope(ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); } scopes diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs index bcf5eb9920076..7a739071506db 100644 --- a/src/librustc_trans/debuginfo/doc.rs +++ b/src/librustc_trans/debuginfo/doc.rs @@ -45,7 +45,7 @@ //! //! All private state used by the module is stored within either the //! CrateDebugContext struct (owned by the CrateContext) or the -//! FunctionDebugContext (owned by the FunctionContext). +//! FunctionDebugContext (owned by the MirContext). //! //! This file consists of three conceptual sections: //! 1. The public interface of the module diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index c27576f9016ae..d055e33919d10 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -25,6 +25,7 @@ use rustc::ty::subst::Kind; use adt::{self, MaybeSizedValue}; use base::*; use callee::Callee; +use cleanup::CleanupScope; use common::*; use machine::*; use monomorphize; @@ -34,7 +35,6 @@ use type_of::{type_of, sizing_type_of, align_of}; use type_::Type; use value::Value; use Disr; -use cleanup::CleanupScope; use builder::Builder; use syntax_pos::DUMMY_SP; @@ -174,8 +174,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let fcx = FunctionContext::new(ccx, llfn); - let mut bcx = fcx.get_entry_block(); + let mut bcx = Builder::entry_block(ccx, llfn); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a @@ -246,7 +245,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. let contents_scope = if !shallow_drop { - fcx.schedule_drop_adt_contents(&bcx, ptr, t) + CleanupScope::schedule_drop_adt_contents(&bcx, ptr, t) } else { CleanupScope::noop() }; diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index af3050fef0d68..7d094b89a059b 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -89,7 +89,6 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, - fcx: &FunctionContext, callee_ty: Ty<'tcx>, fn_ty: &FnType, llargs: &[ValueRef], @@ -127,7 +126,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } "try" => { - try_intrinsic(bcx, fcx, llargs[0], llargs[1], llargs[2], llresult); + try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } "breakpoint" => { @@ -689,7 +688,7 @@ fn memset_intrinsic<'a, 'tcx>( fn try_intrinsic<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, - fcx: &FunctionContext, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, @@ -701,7 +700,7 @@ fn try_intrinsic<'a, 'tcx>( } else if wants_msvc_seh(bcx.sess()) { trans_msvc_try(bcx, fcx, func, data, local_ptr, dest); } else { - trans_gnu_try(bcx, fcx, func, data, local_ptr, dest); + trans_gnu_try(bcx, ccx, func, data, local_ptr, dest); } } @@ -713,12 +712,12 @@ fn try_intrinsic<'a, 'tcx>( // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, - fcx: &FunctionContext, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(fcx, &mut |bcx| { + let llfn = get_rust_try_fn(ccx, &mut |bcx| { let ccx = bcx.ccx; bcx.set_personality_fn(bcx.ccx.eh_personality()); @@ -817,12 +816,12 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, - fcx: &FunctionContext, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(fcx, &mut |bcx| { + let llfn = get_rust_try_fn(ccx, &mut |bcx| { let ccx = bcx.ccx; // Translates the shims described above: @@ -874,13 +873,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // Helper function to give a Block to a closure to translate a shim function. // This is currently primarily used for the `try` intrinsic functions above. -fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, +fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, inputs: Vec>, output: Ty<'tcx>, trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { - let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy { @@ -889,8 +887,8 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let fcx = FunctionContext::new(ccx, llfn); - trans(fcx.get_entry_block()); + let bcx = Builder::entry_block(ccx, llfn); + trans(bcx); llfn } @@ -898,10 +896,9 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // catch exceptions. // // This function is only generated once and is then cached. -fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, +fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { - let ccx = fcx.ccx; if let Some(llfn) = ccx.rust_try_fn().get() { return llfn; } @@ -915,7 +912,7 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(tcx.mk_fn_sig(iter::once(i8p), tcx.mk_nil(), false)), })); let output = tcx.types.i32; - let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); + let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); ccx.rust_try_fn().set(Some(rust_try)); return rust_try } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 1765d6e73b32c..e83b79f11f4dc 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -76,10 +76,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let fcx = FunctionContext::new(ccx, llfn); - let bcx = fcx.get_entry_block(); + let bcx = Builder::entry_block(ccx, llfn); - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(llfn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index a818694267b90..7f720f723b8d9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -302,7 +302,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, &bcx, target); - let panic_block = self.fcx.build_new_block("panic"); + let panic_block = bcx.build_new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -546,7 +546,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bug!("Cannot use direct operand with an intrinsic call") }; - trans_intrinsic_call(&bcx, self.fcx, callee.ty, &fn_ty, &llargs, dest, + trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { @@ -793,13 +793,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let target = self.build_block(target_bb); - let bcx = self.fcx.build_new_block("cleanup"); + let bcx = target.build_new_block("cleanup"); self.landing_pads[target_bb] = Some(bcx.llbb()); let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot, None); @@ -809,7 +809,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.build_new_block("unreachable"); + let bl = self.build_block(mir::START_BLOCK).build_new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() @@ -817,7 +817,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { - let builder = Builder::with_ccx(self.fcx.ccx); + let builder = Builder::with_ccx(self.ccx); builder.position_at_end(self.blocks[bb]); builder } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 94427ee9aefa6..b7b3663b85a38 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -20,7 +20,7 @@ use rustc::ty::TypeFoldable; use session::config::FullDebugInfo; use base; use builder::Builder; -use common::{self, CrateContext, FunctionContext, C_null, Funclet}; +use common::{self, CrateContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::{self, Instance}; use abi::FnType; @@ -31,6 +31,7 @@ use syntax::symbol::keywords; use syntax::abi::Abi; use std::iter; +use std::ffi::CString; use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -49,7 +50,7 @@ pub struct MirContext<'a, 'tcx:'a> { debug_context: debuginfo::FunctionDebugContext, - fcx: &'a common::FunctionContext<'a, 'tcx>, + llfn: ValueRef, ccx: &'a CrateContext<'a, 'tcx>, @@ -199,7 +200,8 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'a, 'tcx: 'a>( - fcx: &'a FunctionContext<'a, 'tcx>, + ccx: &'a CrateContext<'a, 'tcx>, + llfn: ValueRef, fn_ty: FnType, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, @@ -208,29 +210,36 @@ pub fn trans_mir<'a, 'tcx: 'a>( ) { debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir); - let bcx = fcx.get_entry_block(); + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfn, mir); + let bcx = Builder::entry_block(ccx, llfn); let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { - if bb == mir::START_BLOCK { - fcx.new_block("start") + let name = if bb == mir::START_BLOCK { + CString::new("start").unwrap() } else { - fcx.new_block(&format!("{:?}", bb)) + CString::new(format!("{:?}", bb)).unwrap() + }; + unsafe { + llvm::LLVMAppendBasicBlockInContext( + ccx.llcx(), + llfn, + name.as_ptr() + ) } }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context); + let scopes = debuginfo::create_mir_scopes(ccx, mir, &debug_context); let mut mircx = MirContext { mir: mir, - fcx: fcx, + llfn: llfn, fn_ty: fn_ty, - ccx: fcx.ccx, + ccx: ccx, llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, @@ -281,7 +290,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // Temporary or return pointer if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); - let llretptr = llvm::get_param(fcx.llfn, 0); + let llretptr = llvm::get_param(llfn, 0); LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); @@ -319,7 +328,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( if let CleanupKind::Funclet = *cleanup_kind { let bcx = mircx.build_block(bb); bcx.set_personality_fn(mircx.ccx.eh_personality()); - if base::wants_msvc_seh(fcx.ccx.sess()) { + if base::wants_msvc_seh(ccx.sess()) { return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); } } @@ -358,7 +367,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, lvalue_locals: &BitVector) -> Vec> { let mir = mircx.mir; - let fcx = mircx.fcx; let tcx = bcx.ccx.tcx(); let mut idx = 0; let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; @@ -433,7 +441,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, if arg.pad.is_some() { llarg_idx += 1; } - let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; llarg } else if !lvalue_locals.contains(local.index()) && @@ -449,13 +457,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, if arg.pad.is_some() { llarg_idx += 1; } - let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &mircx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); - let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; OperandValue::Pair(llarg, llmeta) } else { From f67e7d6b4ab61acc7de258fd555f79c55b433cbe Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 1 Jan 2017 01:29:23 -0700 Subject: [PATCH 03/18] Add method, new_block, to MirContext for block construction. This makes a slow transition to block construction happening only from MirContext easier. --- src/librustc_trans/mir/block.rs | 42 ++++++++++++++++++--------------- src/librustc_trans/mir/mod.rs | 2 +- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 7f720f723b8d9..db54bd717afcb 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -44,7 +44,7 @@ use super::operand::OperandValue::{Pair, Ref, Immediate}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock, funclets: &IndexVec>) { - let mut bcx = self.build_block(bb); + let mut bcx = self.get_builder(bb); let data = &self.mir[bb]; debug!("trans_block({:?}={:?})", bb, data); @@ -75,7 +75,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; - let llblock = |this: &mut Self, bcx: &Builder, target: mir::BasicBlock| { + let llblock = |this: &mut Self, target: mir::BasicBlock| { let lltarget = this.blocks[target]; if let Some(cp) = cleanup_pad { @@ -85,7 +85,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = bcx.build_new_block(name); + let trampoline = this.new_block(name); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() } @@ -139,8 +139,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { let cond = self.trans_operand(&bcx, cond); - let lltrue = llblock(self, &bcx, true_bb); - let llfalse = llblock(self, &bcx, false_bb); + let lltrue = llblock(self, true_bb); + let llfalse = llblock(self, false_bb); bcx.cond_br(cond.immediate(), lltrue, llfalse); } @@ -159,7 +159,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // code. This is especially helpful in cases like an if-let on a huge enum. // Note: This optimization is only valid for exhaustive matches. Some((&&bb, &c)) if c > targets.len() / 2 => { - (Some(bb), llblock(self, &bcx, bb)) + (Some(bb), llblock(self, bb)) } // We're generating an exhaustive switch, so the else branch // can't be hit. Branching to an unreachable instruction @@ -170,7 +170,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { assert_eq!(adt_def.variants.len(), targets.len()); for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { if default_bb != Some(target) { - let llbb = llblock(self, &bcx, target); + let llbb = llblock(self, target); let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); bcx.add_case(switch, llval, llbb) } @@ -181,10 +181,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = base::to_immediate(&bcx, discr, switch_ty); - let switch = bcx.switch(discr, llblock(self, &bcx, *otherwise), values.len()); + let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty); - let llbb = llblock(self, &bcx, *target); + let llbb = llblock(self, *target); bcx.add_case(switch, val.llval, llbb) } } @@ -261,7 +261,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { drop_fn, args, self.blocks[target], - llblock(self, &bcx, unwind), + llblock(self, unwind), cleanup_bundle ); } else { @@ -301,8 +301,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); // Create the failure block and the conditional branch to it. - let lltarget = llblock(self, &bcx, target); - let panic_block = bcx.build_new_block("panic"); + let lltarget = llblock(self, target); + let panic_block = self.new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -382,7 +382,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.invoke(llfn, &args, self.unreachable_block(), - llblock(self, &bcx, unwind), + llblock(self, unwind), cleanup_bundle); } else { bcx.call(llfn, &args, cleanup_bundle); @@ -580,12 +580,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let invokeret = bcx.invoke(fn_ptr, &llargs, ret_bcx, - llblock(self, &bcx, cleanup), + llblock(self, cleanup), cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); if let Some((_, target)) = *destination { - let ret_bcx = self.build_block(target); + let ret_bcx = self.get_builder(target); ret_bcx.position_at_start(ret_bcx.llbb()); self.set_debug_loc(&ret_bcx, terminator.source_info); let op = OperandRef { @@ -791,9 +791,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return self.blocks[target_bb]; } - let target = self.build_block(target_bb); + let target = self.get_builder(target_bb); - let bcx = target.build_new_block("cleanup"); + let bcx = self.new_block("cleanup"); self.landing_pads[target_bb] = Some(bcx.llbb()); let ccx = bcx.ccx; @@ -809,14 +809,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { - let bl = self.build_block(mir::START_BLOCK).build_new_block("unreachable"); + let bl = self.new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() }) } - pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { + pub fn new_block(&self, name: &str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn, name) + } + + pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { let builder = Builder::with_ccx(self.ccx); builder.position_at_end(self.blocks[bb]); builder diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index b7b3663b85a38..2090c8eaab929 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -326,7 +326,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( let funclets: IndexVec> = mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { if let CleanupKind::Funclet = *cleanup_kind { - let bcx = mircx.build_block(bb); + let bcx = mircx.get_builder(bb); bcx.set_personality_fn(mircx.ccx.eh_personality()); if base::wants_msvc_seh(ccx.sess()) { return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); From 37dd9f6c7b488506c9482c6a296122dce8a3c207 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 1 Jan 2017 08:46:34 -0700 Subject: [PATCH 04/18] Add Builder::sess and Builder::tcx methods --- src/librustc_trans/adt.rs | 6 ++-- src/librustc_trans/asm.rs | 2 +- src/librustc_trans/base.rs | 4 +-- src/librustc_trans/builder.rs | 11 ++++++- src/librustc_trans/cleanup.rs | 6 ++-- src/librustc_trans/debuginfo/source_loc.rs | 2 +- src/librustc_trans/glue.rs | 26 ++++++++--------- src/librustc_trans/intrinsic.rs | 6 ++-- src/librustc_trans/mir/block.rs | 28 +++++++++--------- src/librustc_trans/mir/lvalue.rs | 2 +- src/librustc_trans/mir/mod.rs | 8 ++--- src/librustc_trans/mir/operand.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 34 +++++++++++----------- 13 files changed, 73 insertions(+), 64 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 29a41acd0e557..365f86c024500 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -359,7 +359,7 @@ pub fn trans_get_discr<'a, 'tcx>( layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; let llptrty = type_of::sizing_type_of(bcx.ccx, - monomorphize::field_ty(bcx.ccx.tcx(), substs, + monomorphize::field_ty(bcx.tcx(), substs, &def.variants[nndiscr as usize].fields[0])); bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty)) } @@ -486,7 +486,7 @@ pub fn trans_set_discr<'a, 'tcx>( } fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { - bcx.ccx.sess().target.target.arch == "arm" || bcx.ccx.sess().target.target.arch == "aarch64" + bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { @@ -524,7 +524,7 @@ pub fn trans_field_ptr<'a, 'tcx>( } layout::General { discr: d, ref variants, .. } => { let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); - fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false)); + fields.insert(0, d.to_ty(&bcx.tcx(), false)); struct_field_ptr(bcx, &variants[discr.0 as usize], &fields, val, ix + 1, true) diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 0912e54bf537e..c95d414701876 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -62,7 +62,7 @@ pub fn trans_inline_asm<'a, 'tcx>( // Default per-arch clobbers // Basically what clang does - let arch_clobbers = match &bcx.ccx.sess().target.target.arch[..] { + let arch_clobbers = match &bcx.sess().target.target.arch[..] { "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], _ => Vec::new() }; diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index cdd909477fdc9..f87b44604c7b8 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -272,10 +272,10 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert_eq!(def_a, def_b); let src_fields = def_a.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.ccx.tcx(), substs_a, f) + monomorphize::field_ty(bcx.tcx(), substs_a, f) }); let dst_fields = def_b.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.ccx.tcx(), substs_b, f) + monomorphize::field_ty(bcx.tcx(), substs_b, f) }); let src = adt::MaybeSizedValue::sized(src); diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 610446d316d8e..6112e29f72f4d 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -19,7 +19,8 @@ use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; -use rustc::ty::{Ty, TypeFoldable}; +use rustc::ty::{Ty, TyCtxt, TypeFoldable}; +use rustc::session::Session; use type_of; use std::borrow::Cow; @@ -93,6 +94,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { builder } + pub fn sess(&self) -> &Session { + self.ccx.sess() + } + + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.ccx.tcx() + } + pub fn llfn(&self) -> ValueRef { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index b41b26e1d06f4..8b296e9ad2988 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -59,7 +59,7 @@ impl<'tcx> DropValue<'tcx> { let llpersonality = bcx.ccx.eh_personality(); bcx.set_personality_fn(llpersonality); - if base::wants_msvc_seh(bcx.ccx.sess()) { + if base::wants_msvc_seh(bcx.sess()) { let pad = bcx.cleanup_pad(None, &[]); let funclet = Some(Funclet::new(pad)); self.trans(funclet.as_ref(), &bcx); @@ -80,7 +80,7 @@ impl<'tcx> DropValue<'tcx> { // Insert cleanup instructions into the cleanup block self.trans(None, &bcx); - if !bcx.ccx.sess().target.target.options.custom_unwind_resume { + if !bcx.sess().target.target.options.custom_unwind_resume { bcx.resume(llretval); } else { let exc_ptr = bcx.extract_value(llretval, 0); @@ -132,7 +132,7 @@ impl<'a, 'tcx> CleanupScope<'tcx> { fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { cleanup: Some(drop_val), - landing_pad: if !bcx.ccx.sess().no_landing_pads() { + landing_pad: if !bcx.sess().no_landing_pads() { Some(drop_val.get_landing_pad(bcx)) } else { None diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index e02c8be19a2f4..e99e26261a3a1 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -38,7 +38,7 @@ pub fn set_source_location( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span)); + debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span)); let loc = span_start(builder.ccx, span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index d055e33919d10..8b7df0128b00b 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -44,8 +44,8 @@ pub fn trans_exchange_free_ty<'a, 'tcx>( ptr: MaybeSizedValue, content_ty: Ty<'tcx> ) { - let def_id = langcall(bcx.ccx.tcx(), None, "", BoxFreeFnLangItem); - let substs = bcx.ccx.tcx().mk_substs(iter::once(Kind::from(content_ty))); + let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem); + let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty))); let callee = Callee::def(bcx.ccx, def_id, substs); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); @@ -232,7 +232,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { let shallow_drop = def.is_union(); - let tcx = bcx.ccx.tcx(); + let tcx = bcx.tcx(); let def = t.ty_adt_def().unwrap(); @@ -330,7 +330,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Recurse to get the size of the dynamically sized field (must be // the last field). let last_field = def.struct_variant().fields.last().unwrap(); - let field_ty = monomorphize::field_ty(bcx.ccx.tcx(), substs, last_field); + let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding @@ -382,7 +382,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf (bcx.load(size_ptr), bcx.load(align_ptr)) } ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(bcx.ccx.tcx()); + let unit_ty = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); @@ -405,7 +405,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, av: adt::MaybeSizedValue, variant: &'tcx ty::VariantDef, substs: &Substs<'tcx>) { - let tcx = cx.ccx.tcx(); + let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); @@ -416,7 +416,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, let mut cx = cx; match t.sty { ty::TyClosure(def_id, substs) => { - for (i, upvar_ty) in substs.upvar_tys(def_id, cx.ccx.tcx()).enumerate() { + for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty); } @@ -424,12 +424,12 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ty::TyArray(_, n) => { let base = get_dataptr(&cx, ptr.value); let len = C_uint(cx.ccx, n); - let unit_ty = t.sequence_element_type(cx.ccx.tcx()); + let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); } ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(cx.ccx.tcx()); + let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta, |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); } @@ -441,7 +441,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.ccx.tcx(), t, None); + let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i); let ptr = if cx.ccx.shared().type_is_sized(field_ty) { @@ -469,7 +469,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { - let tcx = cx.ccx.tcx(); + let tcx = cx.tcx(); drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize); // Create a fall-through basic block for the "else" case of @@ -501,13 +501,13 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, } cx = next_cx; } - _ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + _ => cx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), } } }, _ => { - cx.ccx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) + cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) } } return cx; diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 7d094b89a059b..41155935a6353 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -694,7 +694,7 @@ fn try_intrinsic<'a, 'tcx>( local_ptr: ValueRef, dest: ValueRef, ) { - if bcx.ccx.sess().no_landing_pads() { + if bcx.sess().no_landing_pads() { bcx.call(func, &[data], None); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None); } else if wants_msvc_seh(bcx.sess()) { @@ -937,7 +937,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bcx.ccx.sess(), span, + bcx.sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); @@ -959,7 +959,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( - let tcx = bcx.ccx.tcx(); + let tcx = bcx.tcx(); let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig()); let arg_tys = sig.inputs(); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index db54bd717afcb..ae0d847072a59 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -122,7 +122,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); Lifetime::End.call(&bcx, ps); - if !bcx.ccx.sess().target.target.options.custom_unwind_resume { + if !bcx.sess().target.target.options.custom_unwind_resume { bcx.resume(lp); } else { let exc_ptr = bcx.extract_value(lp, 0); @@ -146,7 +146,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); - let ty = discr_lvalue.ty.to_ty(bcx.ccx.tcx()); + let ty = discr_lvalue.ty.to_ty(bcx.tcx()); let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true); let mut bb_hist = FxHashMap(); @@ -203,7 +203,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Lvalue(tr_lvalue) => { OperandRef { val: Ref(tr_lvalue.llval), - ty: tr_lvalue.ty.to_ty(bcx.ccx.tcx()) + ty: tr_lvalue.ty.to_ty(bcx.tcx()) } } }; @@ -233,7 +233,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::TerminatorKind::Drop { ref location, target, unwind } => { - let ty = location.ty(&self.mir, bcx.ccx.tcx()).to_ty(bcx.ccx.tcx()); + let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); let ty = self.monomorphize(&ty); // Double check for necessity to drop @@ -314,7 +314,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { self.set_debug_loc(&bcx, terminator.source_info); // Get the location information. - let loc = bcx.ccx.sess().codemap().lookup_char_pos(span.lo); + let loc = bcx.sess().codemap().lookup_char_pos(span.lo); let filename = Symbol::intern(&loc.file.name).as_str(); let filename = C_str_slice(bcx.ccx, filename); let line = C_u32(bcx.ccx, loc.line as u32); @@ -364,15 +364,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if const_cond == Some(!expected) { if let Some(err) = const_err { let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = bcx.ccx.tcx().sess.struct_span_warn( + let mut diag = bcx.tcx().sess.struct_span_warn( span, "this expression will panic at run-time"); - note_const_eval_err(bcx.ccx.tcx(), &err, span, "expression", &mut diag); + note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); diag.emit(); } } // Obtain the panic entry point. - let def_id = common::langcall(bcx.ccx.tcx(), Some(span), "", lang_item); + let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); let callee = Callee::def(bcx.ccx, def_id, bcx.ccx.empty_substs_for_def_id(def_id)); let llfn = callee.reify(bcx.ccx); @@ -411,12 +411,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => bug!("{} is not callable", callee.ty) }; - let sig = bcx.ccx.tcx().erase_late_bound_regions_and_normalize(sig); + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig); // Handle intrinsics old trans wants Expr's for, ourselves. let intrinsic = match (&callee.ty.sty, &callee.data) { (&ty::TyFnDef(def_id, ..), &Intrinsic) => { - Some(bcx.ccx.tcx().item_name(def_id).as_str()) + Some(bcx.tcx().item_name(def_id).as_str()) } _ => None }; @@ -444,7 +444,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let extra_args = &args[sig.inputs().len()..]; let extra_args = extra_args.iter().map(|op_arg| { - let op_ty = op_arg.ty(&self.mir, bcx.ccx.tcx()); + let op_ty = op_arg.ty(&self.mir, bcx.tcx()); self.monomorphize(&op_ty) }).collect::>(); let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args); @@ -635,7 +635,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let imm_op = |x| OperandRef { val: Immediate(x), // We won't be checking the type again. - ty: bcx.ccx.tcx().types.err + ty: bcx.tcx().types.err }; self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee); self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee); @@ -875,13 +875,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { - let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.ccx.tcx())); + let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx())); let out_type_size = llbitsize_of_real(bcx.ccx, llouttype); if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. let f = Callee::def(bcx.ccx, def_id, substs); let ty = match f.ty.sty { - ty::TyFnDef(.., f) => bcx.ccx.tcx().mk_fn_ptr(f), + ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), _ => f.ty }; val = OperandRef { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index b09ce73400383..5a926c7f75559 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -167,7 +167,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llindex = C_uint(bcx.ccx, from); let llbase = project_index(llindex); - let base_ty = tr_base.ty.to_ty(bcx.ccx.tcx()); + let base_ty = tr_base.ty.to_ty(bcx.tcx()); match base_ty.sty { ty::TyArray(..) => { // must cast the lvalue pointer type to the new diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 2090c8eaab929..d9c0f047d3cb2 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -268,7 +268,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // User variable let source_info = decl.source_info.unwrap(); let debug_scope = mircx.scopes[source_info.scope]; - let dbg = debug_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo; + let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo; if !lvalue_locals.contains(local.index()) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); @@ -367,13 +367,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, lvalue_locals: &BitVector) -> Vec> { let mir = mircx.mir; - let tcx = bcx.ccx.tcx(); + let tcx = bcx.tcx(); let mut idx = 0; let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; - let arg_scope = if arg_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo { + let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo { Some(arg_scope.scope_metadata) } else { None @@ -433,7 +433,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let arg = &mircx.fn_ty.args[idx]; idx += 1; - let llval = if arg.is_indirect() && bcx.ccx.sess().opts.debuginfo != FullDebugInfo { + let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up, unless // we emit extra-debug-info, which requires local allocas :(. diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index c6eedc0d2efda..28a247ee612a9 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -211,7 +211,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // for most lvalues, to consume them we just load them // out from their home let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx()); + let ty = tr_lvalue.ty.to_ty(bcx.tcx()); self.trans_load(bcx, tr_lvalue.llval, ty) } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 00dd49d84e31c..a23c3d4b2e358 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -92,7 +92,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Repeat(ref elem, ref count) => { let tr_elem = self.trans_operand(&bcx, elem); - let size = count.value.as_u64(bcx.ccx.tcx().sess.target.uint_type); + let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { @@ -104,7 +104,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); - let dest_ty = dest.ty.to_ty(bcx.ccx.tcx()); + let dest_ty = dest.ty.to_ty(bcx.tcx()); adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); @@ -120,7 +120,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }, _ => { // If this is a tuple or closure, we need to translate GEP indices. - let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.ccx.tcx())); + let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); let translation = if let Layout::Univariant { ref variant, .. } = *layout { Some(&variant.memory_index) } else { @@ -150,7 +150,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { let lvalue = self.trans_lvalue(&bcx, output); - (lvalue.llval, lvalue.ty.to_ty(bcx.ccx.tcx())) + (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) }).collect(); let input_vals = inputs.iter().map(|input| { @@ -345,9 +345,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Ref(_, bk, ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx()); - let ref_ty = bcx.ccx.tcx().mk_ref( - bcx.ccx.tcx().mk_region(ty::ReErased), + let ty = tr_lvalue.ty.to_ty(bcx.tcx()); + let ref_ty = bcx.tcx().mk_ref( + bcx.tcx().mk_region(ty::ReErased), ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } ); @@ -372,7 +372,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let operand = OperandRef { val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)), - ty: bcx.ccx.tcx().types.usize, + ty: bcx.tcx().types.usize, }; (bcx, operand) } @@ -399,7 +399,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - ty: op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty), + ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), }; (bcx, operand) } @@ -409,8 +409,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let result = self.trans_scalar_checked_binop(&bcx, op, lhs.immediate(), rhs.immediate(), lhs.ty); - let val_ty = op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty); - let operand_ty = bcx.ccx.tcx().intern_tup(&[val_ty, bcx.ccx.tcx().types.bool]); + let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); + let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]); let operand = OperandRef { val: result, ty: operand_ty @@ -444,16 +444,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let align = type_of::align_of(bcx.ccx, content_ty); let llalign = C_uint(bcx.ccx, align); let llty_ptr = llty.ptr_to(); - let box_ty = bcx.ccx.tcx().mk_box(content_ty); + let box_ty = bcx.tcx().mk_box(content_ty); // Allocate space: - let def_id = match bcx.ccx.tcx().lang_items.require(ExchangeMallocFnLangItem) { + let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bcx.ccx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); + bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); } }; - let r = Callee::def(bcx.ccx, def_id, bcx.ccx.tcx().intern_substs(&[])) + let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])) .reify(bcx.ccx); let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); @@ -618,7 +618,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // will only succeed if both operands are constant. // This is necessary to determine when an overflow Assert // will always panic at runtime, and produce a warning. - if let Some((val, of)) = const_scalar_checked_binop(bcx.ccx.tcx(), op, lhs, rhs, input_ty) { + if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { return OperandValue::Pair(val, C_bool(bcx.ccx, of)); } @@ -687,7 +687,7 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef { use syntax::ast::UintTy::*; use rustc::ty::{TyInt, TyUint}; - let tcx = bcx.ccx.tcx(); + let tcx = bcx.tcx(); let new_sty = match ty.sty { TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { From 4c9995a3f9e7c9aa23fe3c1513904746c571b90b Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 1 Jan 2017 11:13:50 -0700 Subject: [PATCH 05/18] Simpliy block creation in MirContext --- src/librustc_trans/mir/mod.rs | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index d9c0f047d3cb2..9e2c7797b5352 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -31,7 +31,6 @@ use syntax::symbol::keywords; use syntax::abi::Abi; use std::iter; -use std::ffi::CString; use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -218,17 +217,10 @@ pub fn trans_mir<'a, 'tcx: 'a>( // Allocate a `Block` for every basic block let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { - let name = if bb == mir::START_BLOCK { - CString::new("start").unwrap() + if bb == mir::START_BLOCK { + bcx.build_new_block("start").llbb() } else { - CString::new(format!("{:?}", bb)).unwrap() - }; - unsafe { - llvm::LLVMAppendBasicBlockInContext( - ccx.llcx(), - llfn, - name.as_ptr() - ) + bcx.build_new_block(&format!("{:?}", bb)).llbb() } }).collect(); From 8038489357b5262645760a24673597486d71c1df Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 1 Jan 2017 15:50:15 -0700 Subject: [PATCH 06/18] Use LvalueRef instead of MaybeSizedValue --- src/librustc_trans/adt.rs | 49 ++++------------ src/librustc_trans/base.rs | 14 +++-- src/librustc_trans/callee.rs | 4 +- src/librustc_trans/cleanup.rs | 29 ++++++---- src/librustc_trans/glue.rs | 95 +++++++++++++++----------------- src/librustc_trans/intrinsic.rs | 5 +- src/librustc_trans/mir/block.rs | 15 ++--- src/librustc_trans/mir/lvalue.rs | 22 +++++++- src/librustc_trans/mir/mod.rs | 4 +- src/librustc_trans/mir/rvalue.rs | 5 +- 10 files changed, 119 insertions(+), 123 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 365f86c024500..14882dd433ba7 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -48,6 +48,7 @@ use std; use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; +use mir::lvalue::LvalueRef; use common::*; use builder::Builder; use glue; @@ -64,32 +65,6 @@ pub enum BranchKind { Single } -#[derive(Copy, Clone)] -pub struct MaybeSizedValue { - pub value: ValueRef, - pub meta: ValueRef, -} - -impl MaybeSizedValue { - pub fn sized(value: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: std::ptr::null_mut() - } - } - - pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: meta - } - } - - pub fn has_meta(&self) -> bool { - !self.meta.is_null() - } -} - /// Given an enum, struct, closure, or tuple, extracts fields. /// Treats closures as a struct with one variant. /// `empty_if_no_variants` is a switch to deal with empty enums. @@ -500,11 +475,11 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, - t: Ty<'tcx>, - val: MaybeSizedValue, + val: LvalueRef<'tcx>, discr: Disr, ix: usize ) -> ValueRef { + let t = val.ty.to_ty(bcx.tcx()); let l = bcx.ccx.layout_of(t); debug!("trans_field_ptr on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we @@ -520,7 +495,7 @@ pub fn trans_field_ptr<'a, 'tcx>( layout::Vector { count, .. } => { assert_eq!(discr.0, 0); assert!((ix as u64) < count); - bcx.struct_gep(val.value, ix) + bcx.struct_gep(val.llval, ix) } layout::General { discr: d, ref variants, .. } => { let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); @@ -532,7 +507,7 @@ pub fn trans_field_ptr<'a, 'tcx>( layout::UntaggedUnion { .. } => { let fields = compute_fields(bcx.ccx, t, 0, false); let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); - bcx.pointercast(val.value, ty.ptr_to()) + bcx.pointercast(val.llval, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } | layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { @@ -541,14 +516,14 @@ pub fn trans_field_ptr<'a, 'tcx>( // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(bcx.ccx, nullfields[ix]); assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); - bcx.pointercast(val.value, ty.ptr_to()) + bcx.pointercast(val.llval, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } => { let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; assert_eq!(ix, 0); assert_eq!(discr.0, nndiscr); let ty = type_of::type_of(bcx.ccx, nnty); - bcx.pointercast(val.value, ty.ptr_to()) + bcx.pointercast(val.llval, ty.ptr_to()) } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { assert_eq!(discr.0, nndiscr); @@ -564,7 +539,7 @@ fn struct_field_ptr<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, st: &layout::Struct, fields: &Vec>, - val: MaybeSizedValue, + val: LvalueRef, ix: usize, needs_cast: bool ) -> ValueRef { @@ -576,9 +551,9 @@ fn struct_field_ptr<'a, 'tcx>( type_of::in_memory_type_of(ccx, fields[i]) }).collect::>(); let real_ty = Type::struct_(ccx, &fields[..], st.packed); - bcx.pointercast(val.value, real_ty.ptr_to()) + bcx.pointercast(val.llval, real_ty.ptr_to()) } else { - val.value + val.llval }; // Simple case - we can just GEP the field @@ -600,7 +575,7 @@ fn struct_field_ptr<'a, 'tcx>( } // There's no metadata available, log the case and just do the GEP. - if !val.has_meta() { + if !val.has_extra() { debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", ix, Value(ptr_val)); return bcx.struct_gep(ptr_val, ix); @@ -621,7 +596,7 @@ fn struct_field_ptr<'a, 'tcx>( // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that // the `y` field has 16-bit alignment. - let meta = val.meta; + let meta = val.llextra; let offset = st.offsets[ix].bytes(); diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index f87b44604c7b8..a329b01cc1608 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -47,6 +47,7 @@ use session::config::{self, NoDebugInfo}; use rustc_incremental::IncrementalHashesMap; use session::{self, DataTypeKind, Session}; use abi::{self, Abi, FnType}; +use mir::lvalue::LvalueRef; use adt; use attributes; use builder::Builder; @@ -278,8 +279,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, monomorphize::field_ty(bcx.tcx(), substs_b, f) }); - let src = adt::MaybeSizedValue::sized(src); - let dst = adt::MaybeSizedValue::sized(dst); + let src = LvalueRef::new_sized_ty(src, src_ty); + let dst = LvalueRef::new_sized_ty(dst, dst_ty); let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { @@ -287,8 +288,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, continue; } - let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); + let src_f = adt::trans_field_ptr(bcx, src, Disr(0), i); + let dst_f = adt::trans_field_ptr(bcx, dst, Disr(0), i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty, None); } else { @@ -620,11 +621,12 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // final ret value bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") }; - let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value + // Can return unsized value + let dest_val = LvalueRef::new_sized_ty(dest, sig.output()); let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); + let lldestptr = adt::trans_field_ptr(&bcx, dest_val, Disr::from(disr), i); let arg = &fn_ty.args[arg_idx]; arg_idx += 1; if common::type_is_fat_ptr(bcx.ccx, arg_ty) { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index e73bfd73e9b07..c99dc7ee5ef4b 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -26,7 +26,7 @@ use base; use builder::Builder; use common::{self, CrateContext, SharedCrateContext}; use cleanup::CleanupScope; -use adt::MaybeSizedValue; +use mir::lvalue::LvalueRef; use consts; use declare; use value::Value; @@ -364,7 +364,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. let self_scope = CleanupScope::schedule_drop_mem( - &bcx, MaybeSizedValue::sized(llenv), closure_ty + &bcx, LvalueRef::new_sized_ty(llenv, closure_ty) ); let llfn = callee.reify(bcx.ccx); diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 8b296e9ad2988..d7ac5bee6d85a 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -20,12 +20,12 @@ use llvm::BasicBlockRef; use base; -use adt::MaybeSizedValue; +use mir::lvalue::LvalueRef; +use rustc::mir::tcx::LvalueTy; use builder::Builder; use common::Funclet; use glue; use type_::Type; -use rustc::ty::Ty; pub struct CleanupScope<'tcx> { // Cleanup to run upon scope exit. @@ -37,14 +37,13 @@ pub struct CleanupScope<'tcx> { #[derive(Copy, Clone)] pub struct DropValue<'tcx> { - val: MaybeSizedValue, - ty: Ty<'tcx>, + val: LvalueRef<'tcx>, skip_dtor: bool, } impl<'tcx> DropValue<'tcx> { fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) { - glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) + glue::call_drop_glue(bcx, self.val, self.skip_dtor, funclet) } /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary @@ -96,12 +95,16 @@ impl<'tcx> DropValue<'tcx> { impl<'a, 'tcx> CleanupScope<'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` pub fn schedule_drop_mem( - bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> + bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx> ) -> CleanupScope<'tcx> { - if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + if let LvalueTy::Downcast { .. } = val.ty { + bug!("Cannot drop downcast ty yet"); + } + if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) { + return CleanupScope::noop(); + } let drop = DropValue { val: val, - ty: ty, skip_dtor: false, }; @@ -114,15 +117,19 @@ impl<'a, 'tcx> CleanupScope<'tcx> { /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. pub fn schedule_drop_adt_contents( - bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx> + bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx> ) -> CleanupScope<'tcx> { + if let LvalueTy::Downcast { .. } = val.ty { + bug!("Cannot drop downcast ty yet"); + } // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) { + return CleanupScope::noop(); + } let drop = DropValue { val: val, - ty: ty, skip_dtor: true, }; diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 8b7df0128b00b..f3977004b336d 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -22,7 +22,8 @@ use rustc::ty::subst::{Substs}; use rustc::traits; use rustc::ty::{self, AdtKind, Ty, TypeFoldable}; use rustc::ty::subst::Kind; -use adt::{self, MaybeSizedValue}; +use mir::lvalue::LvalueRef; +use adt; use base::*; use callee::Callee; use cleanup::CleanupScope; @@ -39,11 +40,8 @@ use builder::Builder; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_ty<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, - ptr: MaybeSizedValue, - content_ty: Ty<'tcx> -) { +pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) { + let content_ty = ptr.ty.to_ty(bcx.tcx()); let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem); let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty))); let callee = Callee::def(bcx.ccx, def_id, substs); @@ -51,7 +49,7 @@ pub fn trans_exchange_free_ty<'a, 'tcx>( let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret = bcx.call(callee.reify(bcx.ccx), - &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize], None); + &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None); fn_ty.apply_attrs_callsite(llret); } @@ -94,17 +92,17 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t } } -fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) { - call_drop_glue(bcx, args, t, false, None) +fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) { + call_drop_glue(bcx, args, false, None) } pub fn call_drop_glue<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, - mut args: MaybeSizedValue, - t: Ty<'tcx>, + mut args: LvalueRef<'tcx>, skip_dtor: bool, funclet: Option<&'a Funclet>, ) { + let t = args.ty.to_ty(bcx.tcx()); // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); if bcx.ccx.shared().type_needs_drop(t) { @@ -117,11 +115,11 @@ pub fn call_drop_glue<'a, 'tcx>( let glue = get_drop_glue_core(ccx, g); let glue_type = get_drop_glue_type(ccx.shared(), t); if glue_type != t { - args.value = bcx.pointercast(args.value, type_of(ccx, glue_type).ptr_to()); + args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to()); } // No drop-hint ==> call standard drop glue - bcx.call(glue, &[args.value, args.meta][..1 + args.has_meta() as usize], + bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize], funclet.map(|b| b.bundle())); } } @@ -194,9 +192,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let value = get_param(llfn, 0); let ptr = if ccx.shared().type_is_sized(t) { - MaybeSizedValue::sized(value) + LvalueRef::new_sized_ty(value, t) } else { - MaybeSizedValue::unsized_(value, get_param(llfn, 1)) + LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t) }; let skip_dtor = match g { @@ -211,14 +209,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) { - let llbox = bcx.load(get_dataptr(&bcx, ptr.value)); - let info = bcx.load(get_meta(&bcx, ptr.value)); - MaybeSizedValue::unsized_(llbox, info) + let llbox = bcx.load(get_dataptr(&bcx, ptr.llval)); + let info = bcx.load(get_meta(&bcx, ptr.llval)); + LvalueRef::new_unsized_ty(llbox, info, content_ty) } else { - MaybeSizedValue::sized(bcx.load(ptr.value)) + LvalueRef::new_sized_ty(bcx.load(ptr.llval), content_ty) }; - drop_ty(&bcx, ptr, content_ty); - trans_exchange_free_ty(&bcx, ptr, content_ty); + drop_ty(&bcx, ptr); + trans_exchange_free_ty(&bcx, ptr); bcx } ty::TyDynamic(..) => { @@ -226,8 +224,8 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // versus without calling Drop::drop. Assert caller is // okay with always calling the Drop impl, if any. assert!(!skip_dtor); - let dtor = bcx.load(ptr.meta); - bcx.call(dtor, &[ptr.value], None); + let dtor = bcx.load(ptr.llextra); + bcx.call(dtor, &[ptr.llval], None); bcx } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { @@ -245,7 +243,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. let contents_scope = if !shallow_drop { - CleanupScope::schedule_drop_adt_contents(&bcx, ptr, t) + CleanupScope::schedule_drop_adt_contents(&bcx, ptr) } else { CleanupScope::noop() }; @@ -262,7 +260,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret; - let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; + let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize]; if let Some(landing_pad) = contents_scope.landing_pad { let normal_bcx = bcx.build_new_block("normal-return"); llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None); @@ -279,7 +277,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi } _ => { if bcx.ccx.shared().type_needs_drop(t) { - drop_structural_ty(bcx, ptr, t) + drop_structural_ty(bcx, ptr) } else { bcx } @@ -396,60 +394,57 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, - ptr: MaybeSizedValue, - t: Ty<'tcx>) - -> Builder<'a, 'tcx> { +fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> Builder<'a, 'tcx> { fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>, - t: Ty<'tcx>, - av: adt::MaybeSizedValue, + av: LvalueRef<'tcx>, variant: &'tcx ty::VariantDef, substs: &Substs<'tcx>) { let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); - drop_ty(&cx, MaybeSizedValue::sized(field_ptr), arg); + let field_ptr = adt::trans_field_ptr(&cx, av, Disr::from(variant.disr_val), i); + drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg)); } } let mut cx = cx; + let t = ptr.ty.to_ty(cx.tcx()); match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); - drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty); + let llupvar = adt::trans_field_ptr(&cx, ptr, Disr(0), i); + drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty)); } } ty::TyArray(_, n) => { - let base = get_dataptr(&cx, ptr.value); + let base = get_dataptr(&cx, ptr.llval); let len = C_uint(cx.ccx, n); let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(&cx, base, unit_ty, len, - |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); + |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty))); } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta, - |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); + cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra, + |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty))); } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); - drop_ty(&cx, MaybeSizedValue::sized(llfld_a), *arg); + let llfld_a = adt::trans_field_ptr(&cx, ptr, Disr(0), i); + drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg)); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i); + let llfld_a = adt::trans_field_ptr(&cx, ptr, Disr::from(discr), i); let ptr = if cx.ccx.shared().type_is_sized(field_ty) { - MaybeSizedValue::sized(llfld_a) + LvalueRef::new_sized_ty(llfld_a, field_ty) } else { - MaybeSizedValue::unsized_(llfld_a, ptr.meta) + LvalueRef::new_unsized_ty(llfld_a, ptr.llextra, field_ty) }; - drop_ty(&cx, ptr, field_ty); + drop_ty(&cx, ptr); } } AdtKind::Union => { @@ -461,16 +456,16 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(&cx, t, ptr.value, false) { + match adt::trans_switch(&cx, t, ptr.llval, false) { (adt::BranchKind::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); - iter_variant(&cx, t, ptr, &adt.variants[0], substs); + iter_variant(&cx, ptr, &adt.variants[0], substs); } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { let tcx = cx.tcx(); - drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize); + drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize)); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -496,7 +491,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, let variant_cx = cx.build_new_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); - iter_variant(&variant_cx, t, ptr, variant, substs); + iter_variant(&variant_cx, ptr, variant, substs); variant_cx.br(next_cx.llbb()); } cx = next_cx; diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 41155935a6353..b80c707f37697 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -16,6 +16,7 @@ use llvm; use llvm::{ValueRef}; use abi::{Abi, FnType}; use adt; +use mir::lvalue::LvalueRef; use base::*; use common::*; use declare; @@ -549,10 +550,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // destructors, and the contents are SIMD // etc. assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); - let arg = adt::MaybeSizedValue::sized(llarg); + let arg = LvalueRef::new_sized_ty(llarg, arg_type); (0..contents.len()) .map(|i| { - bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) + bcx.load(adt::trans_field_ptr(bcx, arg, Disr(0), i)) }) .collect() } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index ae0d847072a59..77ce17006baa0 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -14,7 +14,7 @@ use rustc::middle::lang_items; use rustc::ty::{self, layout}; use rustc::mir; use abi::{Abi, FnType, ArgType}; -use adt::{self, MaybeSizedValue}; +use adt; use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use builder::Builder; @@ -37,7 +37,7 @@ use std::cmp; use super::{MirContext, LocalRef}; use super::analyze::CleanupKind; use super::constant::Const; -use super::lvalue::{LvalueRef}; +use super::lvalue::LvalueRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; @@ -251,11 +251,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { lvalue.llval }; - MaybeSizedValue::sized(value) + LvalueRef::new_sized_ty(value, ty) } else { - MaybeSizedValue::unsized_(lvalue.llval, lvalue.llextra) + LvalueRef::new_unsized_ty(lvalue.llval, lvalue.llextra, ty) }; - let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; + let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize]; if let Some(unwind) = unwind { bcx.invoke( drop_fn, @@ -707,9 +707,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval) => { - let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); + let ptr = adt::trans_field_ptr( + bcx, LvalueRef::new_sized_ty(llval, tuple.ty), Disr(0), n + ); let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5a926c7f75559..fe8f92c679801 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -44,6 +44,18 @@ impl<'tcx> LvalueRef<'tcx> { LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } + pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { + LvalueRef::new_sized(llval, LvalueTy::from_ty(ty)) + } + + pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { + LvalueRef { + llval: llval, + llextra: llextra, + ty: LvalueTy::from_ty(ty), + } + } + pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { let ty = self.ty.to_ty(ccx.tcx()); match ty.sty { @@ -55,6 +67,10 @@ impl<'tcx> LvalueRef<'tcx> { _ => bug!("unexpected type `{}` in LvalueRef::len", ty) } } + + pub fn has_extra(&self) -> bool { + !self.llextra.is_null() + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { @@ -132,11 +148,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let discr = discr as u64; let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); let base = if is_sized { - adt::MaybeSizedValue::sized(tr_base.llval) + LvalueRef::new_sized_ty(tr_base.llval, base_ty) } else { - adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) + LvalueRef::new_unsized_ty(tr_base.llval, tr_base.llextra, base_ty) }; - let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr), + let llprojected = adt::trans_field_ptr(bcx, base, Disr(discr), field.index()); let llextra = if is_sized { ptr::null_mut() diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 9e2c7797b5352..01fd7979be116 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -38,7 +38,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx}; pub use self::constant::trans_static_initializer; use self::analyze::CleanupKind; -use self::lvalue::{LvalueRef}; +use self::lvalue::LvalueRef; use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; @@ -578,7 +578,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mod analyze; mod block; mod constant; -mod lvalue; +pub mod lvalue; mod operand; mod rvalue; mod statement; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index a23c3d4b2e358..9a3d63f18e8b4 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -110,10 +110,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx, op.ty) { - let val = adt::MaybeSizedValue::sized(dest.llval); + let val = LvalueRef::new_sized_ty(dest.llval, dest_ty); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, - field_index); + let lldest_i = adt::trans_field_ptr(&bcx, val, disr, field_index); self.store_operand(&bcx, lldest_i, op, None); } } From ea0ebe41c7dc04374ec0e7e1173bb32b374d02fb Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 09:06:01 -0700 Subject: [PATCH 07/18] Change trans_field_ptr to utilize LvalueTy to determine discriminant. --- src/librustc_trans/adt.rs | 25 +++++++++++---------- src/librustc_trans/base.rs | 14 ++++++++---- src/librustc_trans/glue.rs | 37 ++++++++++++++++++++++---------- src/librustc_trans/intrinsic.rs | 3 +-- src/librustc_trans/mir/block.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 28 ++++++++++-------------- src/librustc_trans/mir/rvalue.rs | 12 ++++++++--- 7 files changed, 72 insertions(+), 49 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 14882dd433ba7..a541306f99a12 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -48,6 +48,7 @@ use std; use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; +use rustc::mir::tcx::LvalueTy; use mir::lvalue::LvalueRef; use common::*; use builder::Builder; @@ -476,31 +477,33 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { pub fn trans_field_ptr<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx>, - discr: Disr, - ix: usize + ix: usize, ) -> ValueRef { + let discr = match val.ty { + LvalueTy::Ty { .. } => 0, + LvalueTy::Downcast { variant_index, .. } => variant_index, + }; let t = val.ty.to_ty(bcx.tcx()); let l = bcx.ccx.layout_of(t); - debug!("trans_field_ptr on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. match *l { layout::Univariant { ref variant, .. } => { - assert_eq!(discr, Disr(0)); + assert_eq!(discr, 0); struct_field_ptr(bcx, &variant, &compute_fields(bcx.ccx, t, 0, false), val, ix, false) } layout::Vector { count, .. } => { - assert_eq!(discr.0, 0); + assert_eq!(discr, 0); assert!((ix as u64) < count); bcx.struct_gep(val.llval, ix) } layout::General { discr: d, ref variants, .. } => { - let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); + let mut fields = compute_fields(bcx.ccx, t, discr, false); fields.insert(0, d.to_ty(&bcx.tcx(), false)); - struct_field_ptr(bcx, &variants[discr.0 as usize], + struct_field_ptr(bcx, &variants[discr], &fields, val, ix + 1, true) } @@ -510,7 +513,7 @@ pub fn trans_field_ptr<'a, 'tcx>( bcx.pointercast(val.llval, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { + layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) @@ -521,14 +524,14 @@ pub fn trans_field_ptr<'a, 'tcx>( layout::RawNullablePointer { nndiscr, .. } => { let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; assert_eq!(ix, 0); - assert_eq!(discr.0, nndiscr); + assert_eq!(discr as u64, nndiscr); let ty = type_of::type_of(bcx.ccx, nnty); bcx.pointercast(val.llval, ty.ptr_to()) } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr.0, nndiscr); + assert_eq!(discr as u64, nndiscr); struct_field_ptr(bcx, &nonnull, - &compute_fields(bcx.ccx, t, discr.0 as usize, false), + &compute_fields(bcx.ccx, t, discr, false), val, ix, false) } _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index a329b01cc1608..5814064767639 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -37,6 +37,7 @@ use llvm; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use middle::lang_items::StartFnLangItem; use rustc::ty::subst::Substs; +use rustc::mir::tcx::LvalueTy; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::adjustment::CustomCoerceUnsized; @@ -288,8 +289,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, continue; } - let src_f = adt::trans_field_ptr(bcx, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, dst, Disr(0), i); + let src_f = adt::trans_field_ptr(bcx, src, i); + let dst_f = adt::trans_field_ptr(bcx, dst, i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty, None); } else { @@ -622,11 +623,16 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") }; // Can return unsized value - let dest_val = LvalueRef::new_sized_ty(dest, sig.output()); + let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output()); + dest_val.ty = LvalueTy::Downcast { + adt_def: sig.output().ty_adt_def().unwrap(), + substs: substs, + variant_index: disr.0 as usize, + }; let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(&bcx, dest_val, Disr::from(disr), i); + let lldestptr = adt::trans_field_ptr(&bcx, dest_val, i); let arg = &fn_ty.args[arg_idx]; arg_idx += 1; if common::type_is_fat_ptr(bcx.ccx, arg_ty) { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index f3977004b336d..0b186d4938ba7 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -20,8 +20,9 @@ use llvm::{ValueRef, get_param}; use middle::lang_items::BoxFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, AdtKind, Ty, TypeFoldable}; +use rustc::ty::{self, AdtDef, AdtKind, Ty, TypeFoldable}; use rustc::ty::subst::Kind; +use rustc::mir::tcx::LvalueTy; use mir::lvalue::LvalueRef; use adt; use base::*; @@ -395,14 +396,22 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Iterates through the elements of a structural type, dropping them. fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> Builder<'a, 'tcx> { - fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>, + fn iter_variant<'a, 'tcx>(cx: &'a Builder<'a, 'tcx>, av: LvalueRef<'tcx>, - variant: &'tcx ty::VariantDef, - substs: &Substs<'tcx>) { + adt_def: &'tcx AdtDef, + variant_index: usize, + substs: &'tcx Substs<'tcx>) { + let variant = &adt_def.variants[variant_index]; let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - let field_ptr = adt::trans_field_ptr(&cx, av, Disr::from(variant.disr_val), i); + let mut av = av.clone(); + av.ty = LvalueTy::Downcast { + adt_def: adt_def, + substs: substs, + variant_index: variant_index, + }; + let field_ptr = adt::trans_field_ptr(&cx, av, i); drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg)); } } @@ -412,7 +421,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(&cx, ptr, Disr(0), i); + let llupvar = adt::trans_field_ptr(&cx, ptr, i); drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty)); } } @@ -430,7 +439,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, ptr, Disr(0), i); + let llfld_a = adt::trans_field_ptr(&cx, ptr, i); drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg)); } } @@ -438,7 +447,13 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> AdtKind::Struct => { let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, ptr, Disr::from(discr), i); + let mut ptr = ptr.clone(); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: Disr::from(discr).0 as usize, + }; + let llfld_a = adt::trans_field_ptr(&cx, ptr, i); let ptr = if cx.ccx.shared().type_is_sized(field_ty) { LvalueRef::new_sized_ty(llfld_a, field_ty) } else { @@ -460,7 +475,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> (adt::BranchKind::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); - iter_variant(&cx, ptr, &adt.variants[0], substs); + iter_variant(&cx, ptr, &adt, 0, substs); } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { @@ -485,13 +500,13 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); let next_cx = cx.build_new_block("enum-iter-next"); - for variant in &adt.variants { + for (i, variant) in adt.variants.iter().enumerate() { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); let variant_cx = cx.build_new_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); - iter_variant(&variant_cx, ptr, variant, substs); + iter_variant(&variant_cx, ptr, &adt, i, substs); variant_cx.br(next_cx.llbb()); } cx = next_cx; diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index b80c707f37697..9a06a242aa757 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -25,7 +25,6 @@ use type_of; use machine; use type_::Type; use rustc::ty::{self, Ty}; -use Disr; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -553,7 +552,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let arg = LvalueRef::new_sized_ty(llarg, arg_type); (0..contents.len()) .map(|i| { - bcx.load(adt::trans_field_ptr(bcx, arg, Disr(0), i)) + bcx.load(adt::trans_field_ptr(bcx, arg, i)) }) .collect() } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 77ce17006baa0..ccaa4140122bd 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -709,7 +709,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Ref(llval) => { for (n, &ty) in arg_types.iter().enumerate() { let ptr = adt::trans_field_ptr( - bcx, LvalueRef::new_sized_ty(llval, tuple.ty), Disr(0), n + bcx, LvalueRef::new_sized_ty(llval, tuple.ty), n ); let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index fe8f92c679801..5f97dfa22cd54 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -20,7 +20,6 @@ use consts; use machine; use type_of::type_of; use type_of; -use Disr; use std::ptr; @@ -48,6 +47,13 @@ impl<'tcx> LvalueRef<'tcx> { LvalueRef::new_sized(llval, LvalueTy::from_ty(ty)) } + pub fn new_unsized(llval: ValueRef, llextra: ValueRef, ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { + LvalueRef { + llval: llval, + llextra: llextra, + ty: ty, + } + } pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { LvalueRef { llval: llval, @@ -140,26 +146,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (llprojected, llextra) = match projection.elem { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { - let base_ty = tr_base.ty.to_ty(tcx); - let discr = match tr_base.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, - }; - let discr = discr as u64; let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); let base = if is_sized { - LvalueRef::new_sized_ty(tr_base.llval, base_ty) - } else { - LvalueRef::new_unsized_ty(tr_base.llval, tr_base.llextra, base_ty) - }; - let llprojected = adt::trans_field_ptr(bcx, base, Disr(discr), - field.index()); - let llextra = if is_sized { - ptr::null_mut() + LvalueRef::new_sized(tr_base.llval, tr_base.ty) } else { - tr_base.llextra + LvalueRef::new_unsized(tr_base.llval, tr_base.llextra, tr_base.ty) }; - (llprojected, llextra) + let llprojected = adt::trans_field_ptr(bcx, base, field.index()); + (llprojected, base.llextra) } mir::ProjectionElem::Index(ref index) => { let index = self.trans_operand(bcx, index); diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 9a3d63f18e8b4..cc468ac18fdd9 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -12,6 +12,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::Layout; +use rustc::mir::tcx::LvalueTy; use rustc::mir; use middle::lang_items::ExchangeMallocFnLangItem; @@ -102,7 +103,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { - mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); let dest_ty = dest.ty.to_ty(bcx.tcx()); adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); @@ -110,9 +111,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx, op.ty) { - let val = LvalueRef::new_sized_ty(dest.llval, dest_ty); + let mut val = LvalueRef::new_sized(dest.llval, dest.ty); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr(&bcx, val, disr, field_index); + val.ty = LvalueTy::Downcast { + adt_def: adt_def, + substs: self.monomorphize(&substs), + variant_index: disr.0 as usize, + }; + let lldest_i = adt::trans_field_ptr(&bcx, val, field_index); self.store_operand(&bcx, lldest_i, op, None); } } From 982b8f4f49588ffe47475321a0d59c2beef1c9be Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 11:00:42 -0700 Subject: [PATCH 08/18] Move trans_const to mir::constant --- src/librustc_trans/adt.rs | 154 +------------------------- src/librustc_trans/mir/constant.rs | 171 +++++++++++++++++++++++++++-- 2 files changed, 164 insertions(+), 161 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index a541306f99a12..04634607c2c50 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -70,9 +70,9 @@ pub enum BranchKind { /// Treats closures as a struct with one variant. /// `empty_if_no_variants` is a switch to deal with empty enums. /// If true, `variant_index` is disregarded and an empty Vec returned in this case. -fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - variant_index: usize, - empty_if_no_variants: bool) -> Vec> { +pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + variant_index: usize, + empty_if_no_variants: bool) -> Vec> { match t.sty { ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { Vec::default() @@ -412,9 +412,7 @@ pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) - /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr -) { +pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr) { let l = bcx.ccx.layout_of(t); match *l { layout::CEnum{ discr, min, max, .. } => { @@ -465,7 +463,7 @@ fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } -fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { +pub fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { if min <= max { assert!(min <= discr && discr <= max) } else { @@ -630,148 +628,6 @@ fn struct_field_ptr<'a, 'tcx>( bcx.pointercast(byte_ptr, ll_fty.ptr_to()) } -/// Construct a constant value, suitable for initializing a -/// GlobalVariable, given a case and constant values for its fields. -/// Note that this may have a different LLVM type (and different -/// alignment!) from the representation's `type_of`, so it needs a -/// pointer cast before use. -/// -/// The LLVM type system does not directly support unions, and only -/// pointers can be bitcast, so a constant (and, by extension, the -/// GlobalVariable initialized by it) will have a type that can vary -/// depending on which case of an enum it is. -/// -/// To understand the alignment situation, consider `enum E { V64(u64), -/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to -/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, -/// i32, i32}`, which is 4-byte aligned. -/// -/// Currently the returned value has the same size as the type, but -/// this could be changed in the future to avoid allocating unnecessary -/// space after values of shorter-than-maximum cases. -pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, discr: Disr, - vals: &[ValueRef]) -> ValueRef { - let l = ccx.layout_of(t); - let dl = &ccx.tcx().data_layout; - match *l { - layout::CEnum { discr: d, min, max, .. } => { - assert_eq!(vals.len(), 0); - assert_discr_in_range(Disr(min), Disr(max), discr); - C_integral(Type::from_integer(ccx, d), discr.0, true) - } - layout::General { discr: d, ref variants, .. } => { - let variant = &variants[discr.0 as usize]; - let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true); - let mut vals_with_discr = vec![lldiscr]; - vals_with_discr.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); - if needed_padding > 0 { - contents.push(padding(ccx, needed_padding)); - } - C_struct(ccx, &contents[..], false) - } - layout::UntaggedUnion { ref variants, .. }=> { - assert_eq!(discr, Disr(0)); - let contents = build_const_union(ccx, variants, vals[0]); - C_struct(ccx, &contents, variants.packed) - } - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, Disr(0)); - let contents = build_const_struct(ccx, &variant, vals); - C_struct(ccx, &contents[..], variant.packed) - } - layout::Vector { .. } => { - C_vector(vals) - } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(ccx, t, nndiscr as usize, false)[0]; - if discr.0 == nndiscr { - assert_eq!(vals.len(), 1); - vals[0] - } else { - C_null(type_of::sizing_type_of(ccx, nnty)) - } - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if discr.0 == nndiscr { - C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) - } else { - let fields = compute_fields(ccx, t, nndiscr as usize, false); - let vals = fields.iter().map(|&ty| { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - C_null(type_of::sizing_type_of(ccx, ty)) - }).collect::>(); - C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) - } - } - _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) - } -} - -/// Building structs is a little complicated, because we might need to -/// insert padding if a field's value is less aligned than its type. -/// -/// Continuing the example from `trans_const`, a value of type `(u32, -/// E)` should have the `E` at offset 8, but if that field's -/// initializer is 4-byte aligned then simply translating the tuple as -/// a two-element struct will locate it at offset 4, and accesses to it -/// will read the wrong memory. -fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &layout::Struct, - vals: &[ValueRef]) - -> Vec { - assert_eq!(vals.len(), st.offsets.len()); - - if vals.len() == 0 { - return Vec::new(); - } - - // offset of current value - let mut offset = 0; - let mut cfields = Vec::new(); - cfields.reserve(st.offsets.len()*2); - - let parts = st.field_index_by_increasing_offset().map(|i| { - (&vals[i], st.offsets[i].bytes()) - }); - for (&val, target_offset) in parts { - if offset < target_offset { - cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; - } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); - } - - if offset < st.stride().bytes() { - cfields.push(padding(ccx, st.stride().bytes() - offset)); - } - - cfields -} - -fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &layout::Union, - field_val: ValueRef) - -> Vec { - let mut cfields = vec![field_val]; - - let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = un.stride().bytes(); - if offset != size { - cfields.push(padding(ccx, size - offset)); - } - - cfields -} - -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) -} - // FIXME this utility routine should be somewhere more general #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index e15c25df91139..13e659a5ae0e8 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,7 +18,7 @@ use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::mir; use rustc::mir::tcx::LvalueTy; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, layout, Ty, TyCtxt, TypeFoldable}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; @@ -27,7 +27,7 @@ use callee::Callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef}; use common::const_to_opt_u128; use consts; use monomorphize::{self, Instance}; @@ -549,16 +549,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::AggregateKind::Adt(..) | mir::AggregateKind::Closure(..) | mir::AggregateKind::Tuple => { - let disr = match *kind { - mir::AggregateKind::Adt(adt_def, index, _, _) => { - Disr::from(adt_def.variants[index].disr_val) - } - _ => Disr(0) - }; - Const::new( - adt::trans_const(self.ccx, dest_ty, disr, &fields), - dest_ty - ) + Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) } } } @@ -946,3 +937,159 @@ pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId) let instance = Instance::mono(ccx.shared(), def_id); MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval) } + +/// Construct a constant value, suitable for initializing a +/// GlobalVariable, given a case and constant values for its fields. +/// Note that this may have a different LLVM type (and different +/// alignment!) from the representation's `type_of`, so it needs a +/// pointer cast before use. +/// +/// The LLVM type system does not directly support unions, and only +/// pointers can be bitcast, so a constant (and, by extension, the +/// GlobalVariable initialized by it) will have a type that can vary +/// depending on which case of an enum it is. +/// +/// To understand the alignment situation, consider `enum E { V64(u64), +/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to +/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, +/// i32, i32}`, which is 4-byte aligned. +/// +/// Currently the returned value has the same size as the type, but +/// this could be changed in the future to avoid allocating unnecessary +/// space after values of shorter-than-maximum cases. +fn trans_const<'a, 'tcx>( + ccx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + kind: &mir::AggregateKind, + vals: &[ValueRef] +) -> ValueRef { + let l = ccx.layout_of(t); + let dl = &ccx.tcx().data_layout; + let variant_index = match *kind { + mir::AggregateKind::Adt(_, index, _, _) => index, + _ => 0, + }; + match *l { + layout::CEnum { discr: d, min, max, .. } => { + let discr = match *kind { + mir::AggregateKind::Adt(adt_def, _, _, _) => { + Disr::from(adt_def.variants[variant_index].disr_val) + }, + _ => Disr(0), + }; + assert_eq!(vals.len(), 0); + adt::assert_discr_in_range(Disr(min), Disr(max), discr); + C_integral(Type::from_integer(ccx, d), discr.0, true) + } + layout::General { discr: d, ref variants, .. } => { + let variant = &variants[variant_index]; + let lldiscr = C_integral(Type::from_integer(ccx, d), variant_index as u64, true); + let mut vals_with_discr = vec![lldiscr]; + vals_with_discr.extend_from_slice(vals); + let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); + let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); + if needed_padding > 0 { + contents.push(padding(ccx, needed_padding)); + } + C_struct(ccx, &contents[..], false) + } + layout::UntaggedUnion { ref variants, .. }=> { + assert_eq!(variant_index, 0); + let contents = build_const_union(ccx, variants, vals[0]); + C_struct(ccx, &contents, variants.packed) + } + layout::Univariant { ref variant, .. } => { + assert_eq!(variant_index, 0); + let contents = build_const_struct(ccx, &variant, vals); + C_struct(ccx, &contents[..], variant.packed) + } + layout::Vector { .. } => { + C_vector(vals) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = adt::compute_fields(ccx, t, nndiscr as usize, false)[0]; + if variant_index as u64 == nndiscr { + assert_eq!(vals.len(), 1); + vals[0] + } else { + C_null(type_of::sizing_type_of(ccx, nnty)) + } + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + if variant_index as u64 == nndiscr { + C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + } else { + let fields = adt::compute_fields(ccx, t, nndiscr as usize, false); + let vals = fields.iter().map(|&ty| { + // Always use null even if it's not the `discrfield`th + // field; see #8506. + C_null(type_of::sizing_type_of(ccx, ty)) + }).collect::>(); + C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) + } + } + _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) + } +} + +/// Building structs is a little complicated, because we might need to +/// insert padding if a field's value is less aligned than its type. +/// +/// Continuing the example from `trans_const`, a value of type `(u32, +/// E)` should have the `E` at offset 8, but if that field's +/// initializer is 4-byte aligned then simply translating the tuple as +/// a two-element struct will locate it at offset 4, and accesses to it +/// will read the wrong memory. +fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + st: &layout::Struct, + vals: &[ValueRef]) + -> Vec { + assert_eq!(vals.len(), st.offsets.len()); + + if vals.len() == 0 { + return Vec::new(); + } + + // offset of current value + let mut offset = 0; + let mut cfields = Vec::new(); + cfields.reserve(st.offsets.len()*2); + + let parts = st.field_index_by_increasing_offset().map(|i| { + (&vals[i], st.offsets[i].bytes()) + }); + for (&val, target_offset) in parts { + if offset < target_offset { + cfields.push(padding(ccx, target_offset - offset)); + offset = target_offset; + } + assert!(!is_undef(val)); + cfields.push(val); + offset += machine::llsize_of_alloc(ccx, val_ty(val)); + } + + if offset < st.stride().bytes() { + cfields.push(padding(ccx, st.stride().bytes() - offset)); + } + + cfields +} + +fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + un: &layout::Union, + field_val: ValueRef) + -> Vec { + let mut cfields = vec![field_val]; + + let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); + let size = un.stride().bytes(); + if offset != size { + cfields.push(padding(ccx, size - offset)); + } + + cfields +} + +fn padding(ccx: &CrateContext, size: u64) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size)) +} From 426c558c5a7d8c9c8b0382e64e64b2647ecdd057 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 12:13:59 -0700 Subject: [PATCH 09/18] Move trans_field_ptr and struct_field_ptr to mir/lvalue --- src/librustc_trans/adt.rs | 161 ------------------------------- src/librustc_trans/base.rs | 6 +- src/librustc_trans/glue.rs | 8 +- src/librustc_trans/intrinsic.rs | 6 +- src/librustc_trans/mir/block.rs | 5 +- src/librustc_trans/mir/lvalue.rs | 160 +++++++++++++++++++++++++++++- src/librustc_trans/mir/rvalue.rs | 2 +- 7 files changed, 167 insertions(+), 181 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 04634607c2c50..28934407c17cc 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -48,17 +48,13 @@ use std; use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; -use rustc::mir::tcx::LvalueTy; -use mir::lvalue::LvalueRef; use common::*; use builder::Builder; -use glue; use base; use machine; use monomorphize; use type_::Type; use type_of; -use value::Value; #[derive(Copy, Clone, PartialEq)] pub enum BranchKind { @@ -471,163 +467,6 @@ pub fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } } -/// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, - val: LvalueRef<'tcx>, - ix: usize, -) -> ValueRef { - let discr = match val.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { variant_index, .. } => variant_index, - }; - let t = val.ty.to_ty(bcx.tcx()); - let l = bcx.ccx.layout_of(t); - // Note: if this ever needs to generate conditionals (e.g., if we - // decide to do some kind of cdr-coding-like non-unique repr - // someday), it will need to return a possibly-new bcx as well. - match *l { - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, 0); - struct_field_ptr(bcx, &variant, - &compute_fields(bcx.ccx, t, 0, false), - val, ix, false) - } - layout::Vector { count, .. } => { - assert_eq!(discr, 0); - assert!((ix as u64) < count); - bcx.struct_gep(val.llval, ix) - } - layout::General { discr: d, ref variants, .. } => { - let mut fields = compute_fields(bcx.ccx, t, discr, false); - fields.insert(0, d.to_ty(&bcx.tcx(), false)); - struct_field_ptr(bcx, &variants[discr], - &fields, - val, ix + 1, true) - } - layout::UntaggedUnion { .. } => { - let fields = compute_fields(bcx.ccx, t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); - bcx.pointercast(val.llval, ty.ptr_to()) - } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { - let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx, nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); - bcx.pointercast(val.llval, ty.ptr_to()) - } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; - assert_eq!(ix, 0); - assert_eq!(discr as u64, nndiscr); - let ty = type_of::type_of(bcx.ccx, nnty); - bcx.pointercast(val.llval, ty.ptr_to()) - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr as u64, nndiscr); - struct_field_ptr(bcx, &nonnull, - &compute_fields(bcx.ccx, t, discr, false), - val, ix, false) - } - _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) - } -} - -fn struct_field_ptr<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, - st: &layout::Struct, - fields: &Vec>, - val: LvalueRef, - ix: usize, - needs_cast: bool -) -> ValueRef { - let fty = fields[ix]; - let ccx = bcx.ccx; - - let ptr_val = if needs_cast { - let fields = st.field_index_by_increasing_offset().map(|i| { - type_of::in_memory_type_of(ccx, fields[i]) - }).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], st.packed); - bcx.pointercast(val.llval, real_ty.ptr_to()) - } else { - val.llval - }; - - // Simple case - we can just GEP the field - // * First field - Always aligned properly - // * Packed struct - There is no alignment padding - // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - bcx.ccx.shared().type_is_sized(fty) { - return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); - } - - // If the type of the last field is [T] or str, then we don't need to do - // any adjusments - match fty.sty { - ty::TySlice(..) | ty::TyStr => { - return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); - } - _ => () - } - - // There's no metadata available, log the case and just do the GEP. - if !val.has_extra() { - debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", - ix, Value(ptr_val)); - return bcx.struct_gep(ptr_val, ix); - } - - // We need to get the pointer manually now. - // We do this by casting to a *i8, then offsetting it by the appropriate amount. - // We do this instead of, say, simply adjusting the pointer from the result of a GEP - // because the field may have an arbitrary alignment in the LLVM representation - // anyway. - // - // To demonstrate: - // struct Foo { - // x: u16, - // y: T - // } - // - // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that - // the `y` field has 16-bit alignment. - - let meta = val.llextra; - - - let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_uint(bcx.ccx, offset); - - // Get the alignment of the field - let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); - - // Bump the unaligned offset up to the appropriate alignment using the - // following expression: - // - // (unaligned offset + (align - 1)) & -align - - // Calculate offset - let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); - let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), - bcx.neg(align)); - - debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); - - // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); - let byte_ptr = bcx.gep(byte_ptr, &[offset]); - - // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); - debug!("struct_field_ptr: Field type is {:?}", ll_fty); - bcx.pointercast(byte_ptr, ll_fty.ptr_to()) -} - // FIXME this utility routine should be somewhere more general #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 5814064767639..8b182dabf5101 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -289,8 +289,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, continue; } - let src_f = adt::trans_field_ptr(bcx, src, i); - let dst_f = adt::trans_field_ptr(bcx, dst, i); + let src_f = src.trans_field_ptr(bcx, i); + let dst_f = dst.trans_field_ptr(bcx, i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty, None); } else { @@ -632,7 +632,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(&bcx, dest_val, i); + let lldestptr = dest_val.trans_field_ptr(&bcx, i); let arg = &fn_ty.args[arg_idx]; arg_idx += 1; if common::type_is_fat_ptr(bcx.ccx, arg_ty) { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 0b186d4938ba7..f25864c8f28a4 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -411,7 +411,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> substs: substs, variant_index: variant_index, }; - let field_ptr = adt::trans_field_ptr(&cx, av, i); + let field_ptr = av.trans_field_ptr(&cx, i); drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg)); } } @@ -421,7 +421,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(&cx, ptr, i); + let llupvar = ptr.trans_field_ptr(&cx, i); drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty)); } } @@ -439,7 +439,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, ptr, i); + let llfld_a = ptr.trans_field_ptr(&cx, i); drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg)); } } @@ -453,7 +453,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> substs: substs, variant_index: Disr::from(discr).0 as usize, }; - let llfld_a = adt::trans_field_ptr(&cx, ptr, i); + let llfld_a = ptr.trans_field_ptr(&cx, i); let ptr = if cx.ccx.shared().type_is_sized(field_ty) { LvalueRef::new_sized_ty(llfld_a, field_ty) } else { diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 9a06a242aa757..35895e7827e1a 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -550,11 +550,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // etc. assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); let arg = LvalueRef::new_sized_ty(llarg, arg_type); - (0..contents.len()) - .map(|i| { - bcx.load(adt::trans_field_ptr(bcx, arg, i)) - }) - .collect() + (0..contents.len()).map(|i| bcx.load(arg.trans_field_ptr(bcx, i))).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index ccaa4140122bd..a62c25f2afc45 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -708,9 +708,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match tuple.val { Ref(llval) => { for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr( - bcx, LvalueRef::new_sized_ty(llval, tuple.ty), n - ); + let ptr = LvalueRef::new_sized_ty(llval, tuple.ty); + let ptr = ptr.trans_field_ptr(bcx, n); let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5f97dfa22cd54..a538a16dc95cb 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::ValueRef; -use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::{self, layout, Ty, TypeFoldable}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -20,6 +20,9 @@ use consts; use machine; use type_of::type_of; use type_of; +use type_::Type; +use value::Value; +use glue; use std::ptr; @@ -38,7 +41,7 @@ pub struct LvalueRef<'tcx> { pub ty: LvalueTy<'tcx>, } -impl<'tcx> LvalueRef<'tcx> { +impl<'a, 'tcx> LvalueRef<'tcx> { pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } @@ -62,7 +65,7 @@ impl<'tcx> LvalueRef<'tcx> { } } - pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { + pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { let ty = self.ty.to_ty(ccx.tcx()); match ty.sty { ty::TyArray(_, n) => common::C_uint(ccx, n), @@ -77,6 +80,155 @@ impl<'tcx> LvalueRef<'tcx> { pub fn has_extra(&self) -> bool { !self.llextra.is_null() } + + pub fn struct_field_ptr( + self, + bcx: &Builder<'a, 'tcx>, + st: &layout::Struct, + fields: &Vec>, + ix: usize, + needs_cast: bool + ) -> ValueRef { + let fty = fields[ix]; + let ccx = bcx.ccx; + + let ptr_val = if needs_cast { + let fields = st.field_index_by_increasing_offset().map(|i| { + type_of::in_memory_type_of(ccx, fields[i]) + }).collect::>(); + let real_ty = Type::struct_(ccx, &fields[..], st.packed); + bcx.pointercast(self.llval, real_ty.ptr_to()) + } else { + self.llval + }; + + // Simple case - we can just GEP the field + // * First field - Always aligned properly + // * Packed struct - There is no alignment padding + // * Field is sized - pointer is properly aligned already + if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || + bcx.ccx.shared().type_is_sized(fty) { + return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); + } + + // If the type of the last field is [T] or str, then we don't need to do + // any adjusments + match fty.sty { + ty::TySlice(..) | ty::TyStr => { + return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); + } + _ => () + } + + // There's no metadata available, log the case and just do the GEP. + if !self.has_extra() { + debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", + ix, Value(ptr_val)); + return bcx.struct_gep(ptr_val, ix); + } + + // We need to get the pointer manually now. + // We do this by casting to a *i8, then offsetting it by the appropriate amount. + // We do this instead of, say, simply adjusting the pointer from the result of a GEP + // because the field may have an arbitrary alignment in the LLVM representation + // anyway. + // + // To demonstrate: + // struct Foo { + // x: u16, + // y: T + // } + // + // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that + // the `y` field has 16-bit alignment. + + let meta = self.llextra; + + + let offset = st.offsets[ix].bytes(); + let unaligned_offset = C_uint(bcx.ccx, offset); + + // Get the alignment of the field + let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); + + // Bump the unaligned offset up to the appropriate alignment using the + // following expression: + // + // (unaligned offset + (align - 1)) & -align + + // Calculate offset + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); + let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), + bcx.neg(align)); + + debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); + + // Cast and adjust pointer + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); + let byte_ptr = bcx.gep(byte_ptr, &[offset]); + + // Finally, cast back to the type expected + let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); + debug!("struct_field_ptr: Field type is {:?}", ll_fty); + bcx.pointercast(byte_ptr, ll_fty.ptr_to()) + } + + /// Access a field, at a point when the value's case is known. + pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef { + let discr = match self.ty { + LvalueTy::Ty { .. } => 0, + LvalueTy::Downcast { variant_index, .. } => variant_index, + }; + let t = self.ty.to_ty(bcx.tcx()); + let l = bcx.ccx.layout_of(t); + // Note: if this ever needs to generate conditionals (e.g., if we + // decide to do some kind of cdr-coding-like non-unique repr + // someday), it will need to return a possibly-new bcx as well. + match *l { + layout::Univariant { ref variant, .. } => { + assert_eq!(discr, 0); + self.struct_field_ptr(bcx, &variant, + &adt::compute_fields(bcx.ccx, t, 0, false), ix, false) + } + layout::Vector { count, .. } => { + assert_eq!(discr, 0); + assert!((ix as u64) < count); + bcx.struct_gep(self.llval, ix) + } + layout::General { discr: d, ref variants, .. } => { + let mut fields = adt::compute_fields(bcx.ccx, t, discr, false); + fields.insert(0, d.to_ty(&bcx.tcx(), false)); + self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true) + } + layout::UntaggedUnion { .. } => { + let fields = adt::compute_fields(bcx.ccx, t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { + let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); + // The unit-like case might have a nonzero number of unit-like fields. + // (e.d., Result of Either with (), as one side.) + let ty = type_of::type_of(bcx.ccx, nullfields[ix]); + assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; + assert_eq!(ix, 0); + assert_eq!(discr as u64, nndiscr); + let ty = type_of::type_of(bcx.ccx, nnty); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + assert_eq!(discr as u64, nndiscr); + self.struct_field_ptr(bcx, &nonnull, + &adt::compute_fields(bcx.ccx, t, discr, false), ix, false) + } + _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) + } + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { @@ -152,7 +304,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { LvalueRef::new_unsized(tr_base.llval, tr_base.llextra, tr_base.ty) }; - let llprojected = adt::trans_field_ptr(bcx, base, field.index()); + let llprojected = base.trans_field_ptr(bcx, field.index()); (llprojected, base.llextra) } mir::ProjectionElem::Index(ref index) => { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index cc468ac18fdd9..67fb8cf576d62 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -118,7 +118,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { substs: self.monomorphize(&substs), variant_index: disr.0 as usize, }; - let lldest_i = adt::trans_field_ptr(&bcx, val, field_index); + let lldest_i = val.trans_field_ptr(&bcx, field_index); self.store_operand(&bcx, lldest_i, op, None); } } From 81e8137b0dc436aad7fda9bffc6b2c2da67a353a Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 13:05:42 -0700 Subject: [PATCH 10/18] Inline trans_switch to simplify code --- src/librustc_trans/adt.rs | 28 ---------------------------- src/librustc_trans/glue.rs | 18 +++++++++++++----- 2 files changed, 13 insertions(+), 33 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 28934407c17cc..c3b9a56ac9778 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -56,12 +56,6 @@ use monomorphize; use type_::Type; use type_of; -#[derive(Copy, Clone, PartialEq)] -pub enum BranchKind { - Switch, - Single -} - /// Given an enum, struct, closure, or tuple, extracts fields. /// Treats closures as a struct with one variant. /// `empty_if_no_variants` is a switch to deal with empty enums. @@ -273,28 +267,6 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> } } -/// Obtain a representation of the discriminant sufficient to translate -/// destructuring; this may or may not involve the actual discriminant. -pub fn trans_switch<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - range_assert: bool -) -> (BranchKind, Option) { - let l = bcx.ccx.layout_of(t); - match *l { - layout::CEnum { .. } | layout::General { .. } | - layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { - (BranchKind::Switch, Some(trans_get_discr(bcx, t, scrutinee, None, range_assert))) - } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => { - // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). - (BranchKind::Single, None) - }, - _ => bug!("{} is not an enum.", t) - } -} - pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { match *l { layout::CEnum { signed, .. }=> signed, diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index f25864c8f28a4..0c21573b9c43b 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -20,7 +20,7 @@ use llvm::{ValueRef, get_param}; use middle::lang_items::BoxFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, AdtDef, AdtKind, Ty, TypeFoldable}; +use rustc::ty::{self, layout, AdtDef, AdtKind, Ty, TypeFoldable}; use rustc::ty::subst::Kind; use rustc::mir::tcx::LvalueTy; use mir::lvalue::LvalueRef; @@ -471,14 +471,22 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(&cx, t, ptr.llval, false) { - (adt::BranchKind::Single, None) => { + // Obtain a representation of the discriminant sufficient to translate + // destructuring; this may or may not involve the actual discriminant. + let l = cx.ccx.layout_of(t); + match *l { + layout::Univariant { .. } | + layout::UntaggedUnion { .. } => { if n_variants != 0 { assert!(n_variants == 1); iter_variant(&cx, ptr, &adt, 0, substs); } } - (adt::BranchKind::Switch, Some(lldiscrim_a)) => { + layout::CEnum { .. } | + layout::General { .. } | + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } => { + let lldiscrim_a = adt::trans_get_discr(&cx, t, ptr.llval, None, false); let tcx = cx.tcx(); drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize)); @@ -511,7 +519,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> } cx = next_cx; } - _ => cx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + _ => bug!("{} is not an enum.", t), } } }, From 901984e1d15343a9eba7ceac8f54011409c54837 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 13:18:38 -0700 Subject: [PATCH 11/18] Builder.build_new_block -> Builder.build_sibling_block --- src/librustc_trans/builder.rs | 14 ++------------ src/librustc_trans/callee.rs | 2 +- src/librustc_trans/cleanup.rs | 2 +- src/librustc_trans/glue.rs | 8 ++++---- src/librustc_trans/intrinsic.rs | 12 ++++++------ src/librustc_trans/mir/mod.rs | 4 ++-- src/librustc_trans/tvec.rs | 6 +++--- 7 files changed, 19 insertions(+), 29 deletions(-) diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 6112e29f72f4d..33e6cc82ad788 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -80,18 +80,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn build_new_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> { - let builder = Builder::with_ccx(self.ccx); - let llbb = unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMAppendBasicBlockInContext( - self.ccx.llcx(), - self.llfn(), - name.as_ptr() - ) - }; - builder.position_at_end(llbb); - builder + pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn(), name) } pub fn sess(&self) -> &Session { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index c99dc7ee5ef4b..ae1086828b542 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -370,7 +370,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { - let normal_bcx = bcx.build_new_block("normal-return"); + let normal_bcx = bcx.build_sibling_block("normal-return"); llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index d7ac5bee6d85a..5d89a67d3fd80 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -54,7 +54,7 @@ impl<'tcx> DropValue<'tcx> { /// This should only be called once per function, as it creates an alloca for the landingpad. fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); - let bcx = bcx.build_new_block("cleanup_unwind"); + let bcx = bcx.build_sibling_block("cleanup_unwind"); let llpersonality = bcx.ccx.eh_personality(); bcx.set_personality_fn(llpersonality); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 0c21573b9c43b..ab048ae6d9de2 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -263,7 +263,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let llret; let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize]; if let Some(landing_pad) = contents_scope.landing_pad { - let normal_bcx = bcx.build_new_block("normal-return"); + let normal_bcx = bcx.build_sibling_block("normal-return"); llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { @@ -503,15 +503,15 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = cx.build_new_block("enum-iter-ret-void"); + let ret_void_cx = cx.build_sibling_block("enum-iter-ret-void"); ret_void_cx.ret_void(); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); - let next_cx = cx.build_new_block("enum-iter-next"); + let next_cx = cx.build_sibling_block("enum-iter-next"); for (i, variant) in adt.variants.iter().enumerate() { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); - let variant_cx = cx.build_new_block(&variant_cx_name); + let variant_cx = cx.build_sibling_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); iter_variant(&variant_cx, ptr, &adt, i, substs); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 35895e7827e1a..3848f3e0f6fdf 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -718,10 +718,10 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx.set_personality_fn(bcx.ccx.eh_personality()); - let normal = bcx.build_new_block("normal"); - let catchswitch = bcx.build_new_block("catchswitch"); - let catchpad = bcx.build_new_block("catchpad"); - let caught = bcx.build_new_block("caught"); + let normal = bcx.build_sibling_block("normal"); + let catchswitch = bcx.build_sibling_block("catchswitch"); + let catchpad = bcx.build_sibling_block("catchpad"); + let caught = bcx.build_sibling_block("caught"); let func = llvm::get_param(bcx.llfn(), 0); let data = llvm::get_param(bcx.llfn(), 1); @@ -837,8 +837,8 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.build_new_block("then"); - let catch = bcx.build_new_block("catch"); + let then = bcx.build_sibling_block("then"); + let catch = bcx.build_sibling_block("catch"); let func = llvm::get_param(bcx.llfn(), 0); let data = llvm::get_param(bcx.llfn(), 1); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 01fd7979be116..603ded1c2d4ac 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -218,9 +218,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { - bcx.build_new_block("start").llbb() + bcx.build_sibling_block("start").llbb() } else { - bcx.build_new_block(&format!("{:?}", bb)).llbb() + bcx.build_sibling_block(&format!("{:?}", bb)).llbb() } }).collect(); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index f6fc4637e1c06..cbcbb02bdc890 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -29,9 +29,9 @@ pub fn slice_for_each<'a, 'tcx, F>( bcx.inbounds_gep(a, &[b]) }; - let body_bcx = bcx.build_new_block("slice_loop_body"); - let next_bcx = bcx.build_new_block("slice_loop_next"); - let header_bcx = bcx.build_new_block("slice_loop_header"); + let body_bcx = bcx.build_sibling_block("slice_loop_body"); + let next_bcx = bcx.build_sibling_block("slice_loop_next"); + let header_bcx = bcx.build_sibling_block("slice_loop_header"); let start = if zst { C_uint(bcx.ccx, 0usize) From ba37c918310b070ae2dfd24199a9cc01ec60280a Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 13:24:04 -0700 Subject: [PATCH 12/18] Fix style nit --- src/librustc_trans/abi.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 18f433eb16249..ad4bb0fce22ad 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -294,9 +294,7 @@ impl ArgType { } } - pub fn store_fn_arg( - &self, bcx: &Builder, idx: &mut usize, dst: ValueRef - ) { + pub fn store_fn_arg(&self, bcx: &Builder, idx: &mut usize, dst: ValueRef) { if self.pad.is_some() { *idx += 1; } From c3fe2590f583e4bb36fd39b7ce32924f696b5081 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 13:51:09 -0700 Subject: [PATCH 13/18] Inline and remove Builder::entry_block --- src/librustc_trans/base.rs | 2 +- src/librustc_trans/builder.rs | 4 ---- src/librustc_trans/callee.rs | 4 ++-- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/intrinsic.rs | 2 +- src/librustc_trans/meth.rs | 2 +- src/librustc_trans/mir/mod.rs | 2 +- 7 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 8b182dabf5101..4cdde24ed48b5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -610,7 +610,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let bcx = Builder::entry_block(ccx, llfn); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); if !fn_ty.ret.is_ignore() { // But if there are no nested returns, we skip the indirection // and have a single retslot diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 33e6cc82ad788..cf7f3e9501d1a 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -51,10 +51,6 @@ fn noname() -> *const c_char { } impl<'a, 'tcx> Builder<'a, 'tcx> { - pub fn entry_block(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef) -> Self { - Builder::new_block(ccx, llfn, "entry-block") - } - pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self { let builder = Builder::with_ccx(ccx); let llbb = unsafe { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index ae1086828b542..257d6c01e4a65 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -329,7 +329,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( attributes::set_frame_pointer_elimination(ccx, lloncefn); let orig_fn_ty = fn_ty; - let mut bcx = Builder::entry_block(ccx, lloncefn); + let mut bcx = Builder::new_block(ccx, lloncefn, "entry-block"); let callee = Callee { data: Fn(llreffn), @@ -489,7 +489,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let bcx = Builder::entry_block(ccx, llfn); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); let mut llargs = get_params(llfn); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index ab048ae6d9de2..98377dedb02a6 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -173,7 +173,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let mut bcx = Builder::entry_block(ccx, llfn); + let mut bcx = Builder::new_block(ccx, llfn, "entry-block"); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 3848f3e0f6fdf..9429a84e23154 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -883,7 +883,7 @@ fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let bcx = Builder::entry_block(ccx, llfn); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); trans(bcx); llfn } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index e83b79f11f4dc..aecba2f57e52c 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -76,7 +76,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let bcx = Builder::entry_block(ccx, llfn); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); let mut llargs = get_params(llfn); let fn_ret = callee.ty.fn_ret(); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 603ded1c2d4ac..824ee5b3c36b5 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -210,7 +210,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( debug!("fn_ty: {:?}", fn_ty); let debug_context = debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfn, mir); - let bcx = Builder::entry_block(ccx, llfn); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); let cleanup_kinds = analyze::cleanup_kinds(&mir); From ca328e1bb4629f7a3ef26c23b759781c4b90588c Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 2 Jan 2017 14:47:15 -0700 Subject: [PATCH 14/18] Simplify code further --- src/librustc_trans/mir/block.rs | 18 ++++++------------ src/librustc_trans/mir/lvalue.rs | 19 +++++-------------- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index a62c25f2afc45..b9e58f79a5141 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -242,20 +242,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } - let lvalue = self.trans_lvalue(&bcx, location); + let mut lvalue = self.trans_lvalue(&bcx, location); let drop_fn = glue::get_drop_glue(bcx.ccx, ty); let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); - let ptr = if bcx.ccx.shared().type_is_sized(ty) { - let value = if drop_ty != ty { - bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) - } else { - lvalue.llval - }; - LvalueRef::new_sized_ty(value, ty) - } else { - LvalueRef::new_unsized_ty(lvalue.llval, lvalue.llextra, ty) - }; - let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize]; + if bcx.ccx.shared().type_is_sized(ty) && drop_ty != ty { + lvalue.llval = bcx.pointercast( + lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()); + } + let args = &[lvalue.llval, lvalue.llextra][..1 + lvalue.has_extra() as usize]; if let Some(unwind) = unwind { bcx.invoke( drop_fn, diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index a538a16dc95cb..bd6e70639bba5 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -50,13 +50,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { LvalueRef::new_sized(llval, LvalueTy::from_ty(ty)) } - pub fn new_unsized(llval: ValueRef, llextra: ValueRef, ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { - LvalueRef { - llval: llval, - llextra: llextra, - ty: ty, - } - } pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { LvalueRef { llval: llval, @@ -81,7 +74,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { !self.llextra.is_null() } - pub fn struct_field_ptr( + fn struct_field_ptr( self, bcx: &Builder<'a, 'tcx>, st: &layout::Struct, @@ -298,14 +291,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (llprojected, llextra) = match projection.elem { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { - let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); - let base = if is_sized { - LvalueRef::new_sized(tr_base.llval, tr_base.ty) + let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) { + ptr::null_mut() } else { - LvalueRef::new_unsized(tr_base.llval, tr_base.llextra, tr_base.ty) + tr_base.llextra }; - let llprojected = base.trans_field_ptr(bcx, field.index()); - (llprojected, base.llextra) + (tr_base.trans_field_ptr(bcx, field.index()), llextra) } mir::ProjectionElem::Index(ref index) => { let index = self.trans_operand(bcx, index); From d25fc9ec5f41b0b608432e7b9fb45d7377380755 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Tue, 3 Jan 2017 20:10:45 -0700 Subject: [PATCH 15/18] Remove extraneous setting of builder positions. --- src/librustc_trans/mir/block.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index b9e58f79a5141..c7fdee90e9975 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -580,14 +580,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some((_, target)) = *destination { let ret_bcx = self.get_builder(target); - ret_bcx.position_at_start(ret_bcx.llbb()); self.set_debug_loc(&ret_bcx, terminator.source_info); let op = OperandRef { val: Immediate(invokeret), ty: sig.output(), }; self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); - ret_bcx.position_at_end(ret_bcx.llbb()); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); From 7dadd14d6c671ca5c159acb635f62dbc909e7cc4 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Tue, 3 Jan 2017 20:16:36 -0700 Subject: [PATCH 16/18] Pull out downcasting into caller of iter_variant Renames iter_variant to iter_variant_fields to more clearly communicate the purpose of the function. --- src/librustc_trans/glue.rs | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 98377dedb02a6..ed58b17e0cb63 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -395,22 +395,21 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> Builder<'a, 'tcx> { - fn iter_variant<'a, 'tcx>(cx: &'a Builder<'a, 'tcx>, - av: LvalueRef<'tcx>, - adt_def: &'tcx AdtDef, - variant_index: usize, - substs: &'tcx Substs<'tcx>) { +fn drop_structural_ty<'a, 'tcx>( + cx: Builder<'a, 'tcx>, + mut ptr: LvalueRef<'tcx> +) -> Builder<'a, 'tcx> { + fn iter_variant_fields<'a, 'tcx>( + cx: &'a Builder<'a, 'tcx>, + av: LvalueRef<'tcx>, + adt_def: &'tcx AdtDef, + variant_index: usize, + substs: &'tcx Substs<'tcx> + ) { let variant = &adt_def.variants[variant_index]; let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - let mut av = av.clone(); - av.ty = LvalueTy::Downcast { - adt_def: adt_def, - substs: substs, - variant_index: variant_index, - }; let field_ptr = av.trans_field_ptr(&cx, i); drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg)); } @@ -479,7 +478,12 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> layout::UntaggedUnion { .. } => { if n_variants != 0 { assert!(n_variants == 1); - iter_variant(&cx, ptr, &adt, 0, substs); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: 0, + }; + iter_variant_fields(&cx, ptr, &adt, 0, substs); } } layout::CEnum { .. } | @@ -514,7 +518,12 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> let variant_cx = cx.build_sibling_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); - iter_variant(&variant_cx, ptr, &adt, i, substs); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: i, + }; + iter_variant_fields(&variant_cx, ptr, &adt, i, substs); variant_cx.br(next_cx.llbb()); } cx = next_cx; From 21f86ba1bc4c58706973c88a81d0b2e51190c0b7 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Tue, 3 Jan 2017 20:54:22 -0700 Subject: [PATCH 17/18] Simplify handling of dropping structs. --- src/librustc_trans/common.rs | 52 +----------------------------------- src/librustc_trans/glue.rs | 25 +++++++---------- 2 files changed, 11 insertions(+), 66 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 8dcb835350b37..13163518f941e 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -15,7 +15,6 @@ use llvm; use llvm::{ValueRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::util::common::MemoizationMap; @@ -38,7 +37,7 @@ use std::borrow::Cow; use std::iter; use syntax::ast; -use syntax::symbol::{Symbol, InternedString}; +use syntax::symbol::InternedString; use syntax_pos::Span; use rustc_i128::u128; @@ -169,55 +168,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - * */ -use Disr; - -/// The concrete version of ty::FieldDef. The name is the field index if -/// the field is numeric. -pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); - -/// The concrete version of ty::VariantDef -pub struct VariantInfo<'tcx> { - pub discr: Disr, - pub fields: Vec> -} - -impl<'a, 'tcx> VariantInfo<'tcx> { - pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - opt_def: Option) - -> Self - { - match ty.sty { - ty::TyAdt(adt, substs) => { - let variant = match opt_def { - None => adt.struct_variant(), - Some(def) => adt.variant_of_def(def) - }; - - VariantInfo { - discr: Disr::from(variant.disr_val), - fields: variant.fields.iter().map(|f| { - Field(f.name, monomorphize::field_ty(tcx, substs, f)) - }).collect() - } - } - - ty::TyTuple(ref v) => { - VariantInfo { - discr: Disr(0), - fields: v.iter().enumerate().map(|(i, &t)| { - Field(Symbol::intern(&i.to_string()), t) - }).collect() - } - } - - _ => { - bug!("cannot get field types from the type {:?}", ty); - } - } - } -} - /// A structure representing an active landing pad for the duration of a basic /// block. /// diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index ed58b17e0cb63..4fe07c9b86abf 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -13,6 +13,7 @@ // Code relating to drop glue. use std; +use std::ptr; use std::iter; use llvm; @@ -444,21 +445,15 @@ fn drop_structural_ty<'a, 'tcx>( } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); - for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let mut ptr = ptr.clone(); - ptr.ty = LvalueTy::Downcast { - adt_def: adt, - substs: substs, - variant_index: Disr::from(discr).0 as usize, - }; - let llfld_a = ptr.trans_field_ptr(&cx, i); - let ptr = if cx.ccx.shared().type_is_sized(field_ty) { - LvalueRef::new_sized_ty(llfld_a, field_ty) - } else { - LvalueRef::new_unsized_ty(llfld_a, ptr.llextra, field_ty) - }; - drop_ty(&cx, ptr); + for (i, field) in adt.variants[0].fields.iter().enumerate() { + let field_ty = monomorphize::field_ty(cx.tcx(), substs, field); + let mut field_ptr = ptr.clone(); + field_ptr.llval = ptr.trans_field_ptr(&cx, i); + field_ptr.ty = LvalueTy::from_ty(field_ty); + if cx.ccx.shared().type_is_sized(field_ty) { + field_ptr.llextra = ptr::null_mut(); + } + drop_ty(&cx, field_ptr); } } AdtKind::Union => { From b01b6e1d5664a1ebaf0e9264be5dddbbc8b4996b Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Wed, 4 Jan 2017 11:47:43 -0700 Subject: [PATCH 18/18] Fix errors introduced during rebase --- src/librustc_trans/intrinsic.rs | 2 +- src/librustc_trans/mir/block.rs | 4 ++-- src/librustc_trans/mir/mod.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 9429a84e23154..842a21e98db46 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -694,7 +694,7 @@ fn try_intrinsic<'a, 'tcx>( bcx.call(func, &[data], None); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, fcx, func, data, local_ptr, dest); + trans_msvc_try(bcx, ccx, func, data, local_ptr, dest); } else { trans_gnu_try(bcx, ccx, func, data, local_ptr, dest); } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index c7fdee90e9975..6d92cd99fbeb9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -209,7 +209,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = bcx.fcx().alloca(ret.original_ty, "ret"); + let llscratch = bcx.alloca(ret.original_ty, "ret"); self.store_operand(&bcx, llscratch, op, None); llscratch } @@ -651,7 +651,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut llval, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.fcx().alloca(arg.original_ty, "arg"); + let llscratch = bcx.alloca(arg.original_ty, "arg"); self.store_operand(bcx, llscratch, op, None); (llscratch, true) } else { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 824ee5b3c36b5..eedd7956805b6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -519,7 +519,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr"); + let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr"); bcx.store(llval, alloc, None); alloc } else {