From 1f3faa8987b82a1d2acf9e590a304363ba9c27c2 Mon Sep 17 00:00:00 2001 From: Irina Popa Date: Thu, 2 Aug 2018 17:48:44 +0300 Subject: [PATCH 01/76] rustc_codegen_llvm: begin generalizing over backend values. --- src/librustc_codegen_llvm/abi.rs | 8 ++--- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/base.rs | 4 +-- src/librustc_codegen_llvm/intrinsic.rs | 8 ++--- src/librustc_codegen_llvm/mir/block.rs | 18 +++++----- src/librustc_codegen_llvm/mir/mod.rs | 18 +++++----- src/librustc_codegen_llvm/mir/operand.rs | 44 ++++++++++++------------ src/librustc_codegen_llvm/mir/place.rs | 24 ++++++------- src/librustc_codegen_llvm/mir/rvalue.rs | 6 ++-- 9 files changed, 66 insertions(+), 66 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 7b93d3e795ed8..7f18a27205854 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -167,8 +167,8 @@ impl LlvmType for CastTarget { pub trait ArgTypeExt<'ll, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>); - fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>); + fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>); + fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>); } impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { @@ -182,7 +182,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) { + fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>) { if self.is_ignore() { return; } @@ -237,7 +237,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } - fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) { + fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>) { let mut next = || { let val = llvm::get_param(bx.llfn(), *idx as c_uint); *idx += 1; diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index f1bb41bcebacf..b267114410734 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -28,7 +28,7 @@ use libc::{c_uint, c_char}; pub fn codegen_inline_asm( bx: &Builder<'a, 'll, 'tcx>, ia: &hir::InlineAsm, - outputs: Vec>, + outputs: Vec>, mut inputs: Vec<&'ll Value> ) -> bool { let mut ext_constraints = vec![]; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 614a562846e86..f45566e1bd48b 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -276,8 +276,8 @@ pub fn unsize_thin_ptr( /// to a value of type `dst_ty` and store the result in `dst` pub fn coerce_unsized_into( bx: &Builder<'a, 'll, 'tcx>, - src: PlaceRef<'ll, 'tcx>, - dst: PlaceRef<'ll, 'tcx> + src: PlaceRef<'tcx, &'ll Value>, + dst: PlaceRef<'tcx, &'ll Value> ) { let src_ty = src.layout.ty; let dst_ty = dst.layout.ty; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 272196afa6f92..0e108a1fd13d1 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -90,7 +90,7 @@ pub fn codegen_intrinsic_call( bx: &Builder<'a, 'll, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType<'tcx, Ty<'tcx>>, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], llresult: &'ll Value, span: Span, ) { @@ -591,7 +591,7 @@ pub fn codegen_intrinsic_call( fn modify_as_needed( bx: &Builder<'a, 'll, 'tcx>, t: &intrinsics::Type, - arg: &OperandRef<'ll, 'tcx>, + arg: &OperandRef<'tcx, &'ll Value>, ) -> Vec<&'ll Value> { match *t { intrinsics::Type::Aggregate(true, ref contents) => { @@ -983,7 +983,7 @@ fn generic_simd_intrinsic( bx: &Builder<'a, 'll, 'tcx>, name: &str, callee_ty: Ty<'tcx>, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, llret_ty: &'ll Type, span: Span @@ -1158,7 +1158,7 @@ fn generic_simd_intrinsic( in_len: usize, bx: &Builder<'a, 'll, 'tcx>, span: Span, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ) -> Result<&'ll Value, ()> { macro_rules! emit_error { ($msg: tt) => { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 68e30227185c0..570eb40ee68f0 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -115,7 +115,7 @@ impl FunctionCx<'a, 'll, 'tcx> { fn_ty: FnType<'tcx, Ty<'tcx>>, fn_ptr: &'ll Value, llargs: &[&'ll Value], - destination: Option<(ReturnDest<'ll, 'tcx>, mir::BasicBlock)>, + destination: Option<(ReturnDest<'tcx, &'ll Value>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { @@ -692,7 +692,7 @@ impl FunctionCx<'a, 'll, 'tcx> { fn codegen_argument(&mut self, bx: &Builder<'a, 'll, 'tcx>, - op: OperandRef<'ll, 'tcx>, + op: OperandRef<'tcx, &'ll Value>, llargs: &mut Vec<&'ll Value>, arg: &ArgType<'tcx, Ty<'tcx>>) { // Fill padding with undef value, where applicable. @@ -803,7 +803,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> { let cx = bx.cx; if let Some(slot) = self.personality_slot { slot @@ -879,7 +879,7 @@ impl FunctionCx<'a, 'll, 'tcx> { fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) - -> ReturnDest<'ll, 'tcx> { + -> ReturnDest<'tcx, &'ll Value> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret.is_ignore() { return ReturnDest::Nothing; @@ -963,7 +963,7 @@ impl FunctionCx<'a, 'll, 'tcx> { fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>, src: &mir::Operand<'tcx>, - dst: PlaceRef<'ll, 'tcx>) { + dst: PlaceRef<'tcx, &'ll Value>) { let src = self.codegen_operand(bx, src); let llty = src.layout.llvm_type(bx.cx); let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); @@ -975,7 +975,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // Stores the return value of a function call into it's final location. fn store_return(&mut self, bx: &Builder<'a, 'll, 'tcx>, - dest: ReturnDest<'ll, 'tcx>, + dest: ReturnDest<'tcx, &'ll Value>, ret_ty: &ArgType<'tcx, Ty<'tcx>>, llval: &'ll Value) { use self::ReturnDest::*; @@ -1006,13 +1006,13 @@ impl FunctionCx<'a, 'll, 'tcx> { } } -enum ReturnDest<'ll, 'tcx> { +enum ReturnDest<'tcx, V> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer - Store(PlaceRef<'ll, 'tcx>), + Store(PlaceRef<'tcx, V>), // Stores an indirect return value to an operand local place - IndirectOperand(PlaceRef<'ll, 'tcx>, mir::Local), + IndirectOperand(PlaceRef<'tcx, V>, mir::Local), // Stores a direct return value to an operand local place DirectOperand(mir::Local) } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index a6e2ccf92e4e3..a7208b416a62e 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -63,7 +63,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - personality_slot: Option>, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` blocks: IndexVec, @@ -97,7 +97,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec>, + locals: IndexVec>, /// Debug information for MIR scopes. scopes: IndexVec>, @@ -178,18 +178,18 @@ impl FunctionCx<'a, 'll, 'tcx> { } } -enum LocalRef<'ll, 'tcx> { - Place(PlaceRef<'ll, 'tcx>), +enum LocalRef<'tcx, V> { + Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. - UnsizedPlace(PlaceRef<'ll, 'tcx>), - Operand(Option>), + UnsizedPlace(PlaceRef<'tcx, V>), + Operand(Option>), } -impl LocalRef<'ll, 'tcx> { - fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'ll, 'tcx> { +impl LocalRef<'tcx, &'ll Value> { + fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx, &'ll Value> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -436,7 +436,7 @@ fn arg_local_refs( fx: &FunctionCx<'a, 'll, 'tcx>, scopes: &IndexVec>, memory_locals: &BitSet, -) -> Vec> { +) -> Vec> { let mir = fx.mir; let tcx = bx.tcx(); let mut idx = 0; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index ab43531240f3f..176ba8b66ba7e 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -32,16 +32,16 @@ use super::place::PlaceRef; /// uniquely determined by the value's type, but is kept as a /// safety check. #[derive(Copy, Clone, Debug)] -pub enum OperandValue<'ll> { +pub enum OperandValue { /// A reference to the actual operand. The data is guaranteed /// to be valid for the operand's lifetime. /// The second value, if any, is the extra data (vtable or length) /// which indicates that it refers to an unsized rvalue. - Ref(&'ll Value, Option<&'ll Value>, Align), + Ref(V, Option, Align), /// A single LLVM value. - Immediate(&'ll Value), + Immediate(V), /// A pair of immediate LLVM values. Used by fat pointers too. - Pair(&'ll Value, &'ll Value) + Pair(V, V) } /// An `OperandRef` is an "SSA" reference to a Rust value, along with @@ -53,23 +53,23 @@ pub enum OperandValue<'ll> { /// directly is sure to cause problems -- use `OperandRef::store` /// instead. #[derive(Copy, Clone)] -pub struct OperandRef<'ll, 'tcx> { +pub struct OperandRef<'tcx, V> { // The value. - pub val: OperandValue<'ll>, + pub val: OperandValue, // The layout of value, based on its Rust type. pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'ll, 'tcx> { +impl fmt::Debug for OperandRef<'tcx, &'ll Value> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } -impl OperandRef<'ll, 'tcx> { +impl OperandRef<'tcx, &'ll Value> { pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>, - layout: TyLayout<'tcx>) -> OperandRef<'ll, 'tcx> { + layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { assert!(layout.is_zst()); OperandRef { val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))), @@ -79,7 +79,7 @@ impl OperandRef<'ll, 'tcx> { pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, val: &'tcx ty::Const<'tcx>) - -> Result, Lrc>> { + -> Result, Lrc>> { let layout = bx.cx.layout_of(val.ty); if layout.is_zst() { @@ -141,7 +141,7 @@ impl OperandRef<'ll, 'tcx> { } } - pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> { let projected_ty = self.layout.ty.builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { @@ -179,7 +179,7 @@ impl OperandRef<'ll, 'tcx> { pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, llval: &'ll Value, layout: TyLayout<'tcx>) - -> OperandRef<'ll, 'tcx> { + -> OperandRef<'tcx, &'ll Value> { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); @@ -194,7 +194,7 @@ impl OperandRef<'ll, 'tcx> { OperandRef { val, layout } } - pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'ll, 'tcx> { + pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'tcx, &'ll Value> { let field = self.layout.field(bx.cx, i); let offset = self.layout.fields.offset(i); @@ -252,27 +252,27 @@ impl OperandRef<'ll, 'tcx> { } } -impl OperandValue<'ll> { - pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { +impl OperandValue<&'ll Value> { + pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } fn store_with_flags( self, bx: &Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, + dest: PlaceRef<'tcx, &'ll Value>, flags: MemFlags, ) { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); @@ -337,7 +337,7 @@ impl FunctionCx<'a, 'll, 'tcx> { fn maybe_codegen_consume_direct(&mut self, bx: &Builder<'a, 'll, 'tcx>, place: &mir::Place<'tcx>) - -> Option> + -> Option> { debug!("maybe_codegen_consume_direct(place={:?})", place); @@ -385,7 +385,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_consume(&mut self, bx: &Builder<'a, 'll, 'tcx>, place: &mir::Place<'tcx>) - -> OperandRef<'ll, 'tcx> + -> OperandRef<'tcx, &'ll Value> { debug!("codegen_consume(place={:?})", place); @@ -409,7 +409,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_operand(&mut self, bx: &Builder<'a, 'll, 'tcx>, operand: &mir::Operand<'tcx>) - -> OperandRef<'ll, 'tcx> + -> OperandRef<'tcx, &'ll Value> { debug!("codegen_operand(operand={:?})", operand); diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index e7b6f5908a4d1..603a5de4f6486 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -27,12 +27,12 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; #[derive(Copy, Clone, Debug)] -pub struct PlaceRef<'ll, 'tcx> { +pub struct PlaceRef<'tcx, V> { /// Pointer to the contents of the place - pub llval: &'ll Value, + pub llval: V, /// This place's extra data if it is unsized, or null - pub llextra: Option<&'ll Value>, + pub llextra: Option, /// Monomorphized type of this place, including variant information pub layout: TyLayout<'tcx>, @@ -41,12 +41,12 @@ pub struct PlaceRef<'ll, 'tcx> { pub align: Align, } -impl PlaceRef<'ll, 'tcx> { +impl PlaceRef<'tcx, &'ll Value> { pub fn new_sized( llval: &'ll Value, layout: TyLayout<'tcx>, align: Align, - ) -> PlaceRef<'ll, 'tcx> { + ) -> PlaceRef<'tcx, &'ll Value> { assert!(!layout.is_unsized()); PlaceRef { llval, @@ -61,7 +61,7 @@ impl PlaceRef<'ll, 'tcx> { layout: TyLayout<'tcx>, alloc: &mir::interpret::Allocation, offset: Size, - ) -> PlaceRef<'ll, 'tcx> { + ) -> PlaceRef<'tcx, &'ll Value> { let init = const_alloc_to_llvm(bx.cx, alloc); let base_addr = consts::addr_of(bx.cx, init, layout.align, None); @@ -75,7 +75,7 @@ impl PlaceRef<'ll, 'tcx> { } pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + -> PlaceRef<'tcx, &'ll Value> { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align); @@ -105,7 +105,7 @@ impl PlaceRef<'ll, 'tcx> { } } - pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { + pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> { debug!("PlaceRef::load: {:?}", self); assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); @@ -169,7 +169,7 @@ impl PlaceRef<'ll, 'tcx> { } /// Access a field, at a point when the value's case is known. - pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> { + pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'tcx, &'ll Value> { let cx = bx.cx; let field = self.layout.field(cx, ix); let offset = self.layout.fields.offset(ix); @@ -392,7 +392,7 @@ impl PlaceRef<'ll, 'tcx> { } pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) - -> PlaceRef<'ll, 'tcx> { + -> PlaceRef<'tcx, &'ll Value> { PlaceRef { llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), llextra: None, @@ -402,7 +402,7 @@ impl PlaceRef<'ll, 'tcx> { } pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) - -> PlaceRef<'ll, 'tcx> { + -> PlaceRef<'tcx, &'ll Value> { let mut downcast = *self; downcast.layout = self.layout.for_variant(bx.cx, variant_index); @@ -426,7 +426,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_place(&mut self, bx: &Builder<'a, 'll, 'tcx>, place: &mir::Place<'tcx>) - -> PlaceRef<'ll, 'tcx> { + -> PlaceRef<'tcx, &'ll Value> { debug!("codegen_place(place={:?})", place); let cx = bx.cx; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index fa22bdff94ddd..b6debd961c71f 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -35,7 +35,7 @@ use super::place::PlaceRef; impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_rvalue(&mut self, bx: Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, + dest: PlaceRef<'tcx, &'ll Value>, rvalue: &mir::Rvalue<'tcx>) -> Builder<'a, 'll, 'tcx> { @@ -201,7 +201,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_rvalue_operand(&mut self, bx: Builder<'a, 'll, 'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>) + -> (Builder<'a, 'll, 'tcx>, OperandRef<'tcx, &'ll Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); @@ -677,7 +677,7 @@ impl FunctionCx<'a, 'll, 'tcx> { op: mir::BinOp, lhs: &'ll Value, rhs: &'ll Value, - input_ty: Ty<'tcx>) -> OperandValue<'ll> { + input_ty: Ty<'tcx>) -> OperandValue<&'ll Value> { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), From 67f94d87d2d68c9b0742f309d848dfe87fcbaf55 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 3 Aug 2018 14:20:10 +0200 Subject: [PATCH 02/76] Reduced line length to pass tidy Generalized FunctionCx Added ValueTrait and first change Generalize CondegenCx Generalized the Builder struct defined in librustc_codegen_llvm/builder.rs --- src/librustc_codegen_llvm/abi.rs | 55 +++++++++------ src/librustc_codegen_llvm/asm.rs | 4 +- src/librustc_codegen_llvm/attributes.rs | 6 +- src/librustc_codegen_llvm/base.rs | 34 ++++----- src/librustc_codegen_llvm/builder.rs | 18 +++-- src/librustc_codegen_llvm/callee.rs | 4 +- src/librustc_codegen_llvm/common.rs | 40 ++++++----- src/librustc_codegen_llvm/consts.rs | 12 ++-- src/librustc_codegen_llvm/context.rs | 43 ++++++------ .../debuginfo/create_scope_map.rs | 5 +- src/librustc_codegen_llvm/debuginfo/gdb.rs | 6 +- .../debuginfo/metadata.rs | 70 +++++++++---------- src/librustc_codegen_llvm/debuginfo/mod.rs | 14 ++-- .../debuginfo/namespace.rs | 5 +- .../debuginfo/source_loc.rs | 5 +- .../debuginfo/type_names.rs | 9 +-- src/librustc_codegen_llvm/debuginfo/utils.rs | 11 +-- src/librustc_codegen_llvm/declare.rs | 29 +++++--- src/librustc_codegen_llvm/glue.rs | 2 +- src/librustc_codegen_llvm/intrinsic.rs | 43 ++++++------ src/librustc_codegen_llvm/meth.rs | 6 +- src/librustc_codegen_llvm/mir/analyze.rs | 13 ++-- src/librustc_codegen_llvm/mir/block.rs | 26 +++---- src/librustc_codegen_llvm/mir/constant.rs | 14 ++-- src/librustc_codegen_llvm/mir/mod.rs | 30 ++++---- src/librustc_codegen_llvm/mir/operand.rs | 41 ++++++----- src/librustc_codegen_llvm/mir/place.rs | 36 ++++++---- src/librustc_codegen_llvm/mir/rvalue.rs | 24 +++---- src/librustc_codegen_llvm/mir/statement.rs | 7 +- src/librustc_codegen_llvm/mono_item.rs | 9 +-- src/librustc_codegen_llvm/type_.rs | 59 +++++++++------- src/librustc_codegen_llvm/type_of.rs | 27 +++---- src/librustc_codegen_llvm/value.rs | 4 ++ 33 files changed, 392 insertions(+), 319 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 7f18a27205854..90f748fdec036 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -104,11 +104,11 @@ impl ArgAttributesExt for ArgAttributes { } pub trait LlvmType { - fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type; + fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type; } impl LlvmType for Reg { - fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { match self.kind { RegKind::Integer => Type::ix(cx, self.size.bits()), RegKind::Float => { @@ -126,7 +126,7 @@ impl LlvmType for Reg { } impl LlvmType for CastTarget { - fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { let rest_ll_unit = self.rest.unit.llvm_type(cx); let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { (0, 0) @@ -166,15 +166,19 @@ impl LlvmType for CastTarget { } pub trait ArgTypeExt<'ll, 'tcx> { - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>); - fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>); + fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; + fn store(&self, bx: &Builder<'_, 'll, 'tcx, &'ll Value>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>); + fn store_fn_arg( + &self, + bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value> + ); } impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// Get the LLVM type for a place of the original Rust type of /// this argument/return, i.e. the result of `type_of::type_of`. - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type { self.layout.llvm_type(cx) } @@ -182,7 +186,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>) { + fn store(&self, bx: &Builder<'_, 'll, 'tcx, &'ll Value>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>) { if self.is_ignore() { return; } @@ -237,7 +241,12 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } - fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>) { + fn store_fn_arg( + &self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + idx: &mut usize, + dst: PlaceRef<'tcx, &'ll Value> + ) { let mut next = || { let val = llvm::get_param(bx.llfn(), *idx as c_uint); *idx += 1; @@ -259,37 +268,37 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } pub trait FnTypeExt<'tcx> { - fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self; - fn new(cx: &CodegenCx<'ll, 'tcx>, + fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>) -> Self; + fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self; - fn new_vtable(cx: &CodegenCx<'ll, 'tcx>, + fn new_vtable(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self; fn new_internal( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], mk_arg_type: impl Fn(Ty<'tcx>, Option) -> ArgType<'tcx, Ty<'tcx>>, ) -> Self; fn adjust_for_abi(&mut self, - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, abi: Abi); - fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value); + fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value); } impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { - fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self { + fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>) -> Self { let fn_ty = instance.ty(cx.tcx); let sig = ty_fn_sig(cx, fn_ty); let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); FnType::new(cx, sig, &[]) } - fn new(cx: &CodegenCx<'ll, 'tcx>, + fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self { FnType::new_internal(cx, sig, extra_args, |ty, _| { @@ -297,7 +306,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { }) } - fn new_vtable(cx: &CodegenCx<'ll, 'tcx>, + fn new_vtable(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self { FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| { @@ -324,7 +333,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } fn new_internal( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], mk_arg_type: impl Fn(Ty<'tcx>, Option) -> ArgType<'tcx, Ty<'tcx>>, @@ -505,7 +514,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } fn adjust_for_abi(&mut self, - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, abi: Abi) { if abi == Abi::Unadjusted { return } @@ -575,7 +584,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type { let args_capacity: usize = self.args.iter().map(|arg| if arg.pad.is_some() { 1 } else { 0 } + if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 } @@ -681,7 +690,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { + fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value) { let mut i = 0; let mut apply = |attrs: &ArgAttributes| { attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index b267114410734..f9944f6956857 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -26,7 +26,7 @@ use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM pub fn codegen_inline_asm( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, ia: &hir::InlineAsm, outputs: Vec>, mut inputs: Vec<&'ll Value> @@ -125,7 +125,7 @@ pub fn codegen_inline_asm( return true; } -pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, ga: &hir::GlobalAsm) { let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); unsafe { diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index f45b3728bc1b0..14f37d7584002 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -76,7 +76,7 @@ pub fn naked(val: &'ll Value, is_naked: bool) { Attribute::Naked.toggle_llfn(Function, val, is_naked); } -pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value) { if cx.sess().must_not_eliminate_frame_pointers() { llvm::AddFunctionAttrStringValue( llfn, llvm::AttributePlace::Function, @@ -84,7 +84,7 @@ pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) } } -pub fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn set_probestack(cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value) { // Only use stack probes if the target specification indicates that we // should be using stack probes if !cx.sess().target.target.options.stack_probes { @@ -149,7 +149,7 @@ pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) { /// Composite function which sets LLVM attributes for function depending on its AST (#[attribute]) /// attributes. pub fn from_fn_attrs( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value, id: Option, ) { diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index f45566e1bd48b..c1ae0586a308b 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -94,13 +94,13 @@ use mir::operand::OperandValue; use rustc_codegen_utils::check_for_rustc_errors_attr; pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll> { - cx: &'a CodegenCx<'ll, 'tcx>, + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, name: Option, istart: usize, } impl StatRecorder<'a, 'll, 'tcx> { - pub fn new(cx: &'a CodegenCx<'ll, 'tcx>, name: String) -> Self { + pub fn new(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, name: String) -> Self { let istart = cx.stats.borrow().n_llvm_insns; StatRecorder { cx, @@ -158,7 +158,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate { } pub fn compare_simd_types( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, lhs: &'ll Value, rhs: &'ll Value, t: Ty<'tcx>, @@ -190,7 +190,7 @@ pub fn compare_simd_types( /// in an upcast, where the new vtable for an object will be derived /// from the old one. pub fn unsized_info( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, source: Ty<'tcx>, target: Ty<'tcx>, old_info: Option<&'ll Value>, @@ -220,7 +220,7 @@ pub fn unsized_info( /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. pub fn unsize_thin_ptr( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, src: &'ll Value, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> @@ -275,7 +275,7 @@ pub fn unsize_thin_ptr( /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` pub fn coerce_unsized_into( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, src: PlaceRef<'tcx, &'ll Value>, dst: PlaceRef<'tcx, &'ll Value> ) { @@ -334,7 +334,7 @@ pub fn coerce_unsized_into( } pub fn cast_shift_expr_rhs( - cx: &Builder<'_, 'll, '_>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value + cx: &Builder<'_, 'll, '_, &'ll Value>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value ) -> &'ll Value { cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) } @@ -383,12 +383,12 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) { +pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume"); bx.call(assume_intrinsic, &[val], None); } -pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value { +pub fn from_immediate(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) -> &'ll Value { if val_ty(val) == Type::i1(bx.cx) { bx.zext(val, Type::i8(bx.cx)) } else { @@ -397,7 +397,7 @@ pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value } pub fn to_immediate( - bx: &Builder<'_, 'll, '_>, + bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value, layout: layout::TyLayout, ) -> &'ll Value { @@ -408,7 +408,7 @@ pub fn to_immediate( } pub fn to_immediate_scalar( - bx: &Builder<'_, 'll, '_>, + bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value, scalar: &layout::Scalar, ) -> &'ll Value { @@ -419,7 +419,7 @@ pub fn to_immediate_scalar( } pub fn call_memcpy( - bx: &Builder<'_, 'll, '_>, + bx: &Builder<'_, 'll, '_, &'ll Value>, dst: &'ll Value, src: &'ll Value, n_bytes: &'ll Value, @@ -446,7 +446,7 @@ pub fn call_memcpy( } pub fn memcpy_ty( - bx: &Builder<'_, 'll, 'tcx>, + bx: &Builder<'_, 'll, 'tcx, &'ll Value>, dst: &'ll Value, src: &'ll Value, layout: TyLayout<'tcx>, @@ -462,7 +462,7 @@ pub fn memcpy_ty( } pub fn call_memset( - bx: &Builder<'_, 'll, '_>, + bx: &Builder<'_, 'll, '_, &'ll Value>, ptr: &'ll Value, fill_byte: &'ll Value, size: &'ll Value, @@ -476,7 +476,7 @@ pub fn call_memset( bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } -pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) { +pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: Instance<'tcx>) { let _s = if cx.sess().codegen_stats() { let mut instance_name = String::new(); DefPathBasedNames::new(cx.tcx, true, true) @@ -517,7 +517,7 @@ pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { /// Create the `main` function which will initialize the rust runtime and call /// users main function. -fn maybe_create_entry_wrapper(cx: &CodegenCx) { +fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { Some((id, span, _)) => { (cx.tcx.hir.local_def_id(id), span) @@ -543,7 +543,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) { } fn create_entry_fn( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, sp: Span, rust_main: &'ll Value, rust_main_def_id: DefId, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 169bd9a8466a0..9e929d5b722fc 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -26,12 +26,12 @@ use std::ptr; // All Builders must have an llfn associated with them #[must_use] -pub struct Builder<'a, 'll: 'a, 'tcx: 'll> { +pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V : 'll> { pub llbuilder: &'ll mut llvm::Builder<'ll>, - pub cx: &'a CodegenCx<'ll, 'tcx>, + pub cx: &'a CodegenCx<'ll, 'tcx, V>, } -impl Drop for Builder<'a, 'll, 'tcx> { +impl Drop for Builder<'_, '_, '_, V> { fn drop(&mut self) { unsafe { llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); @@ -54,8 +54,12 @@ bitflags! { } } -impl Builder<'a, 'll, 'tcx> { - pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self { +impl Builder<'a, 'll, 'tcx, &'ll Value> { + pub fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, + llfn: &'ll Value, + name: &'b str + ) -> Self { let bx = Builder::with_cx(cx); let llbb = unsafe { let name = SmallCStr::new(name); @@ -69,7 +73,7 @@ impl Builder<'a, 'll, 'tcx> { bx } - pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -80,7 +84,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx> { + pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx, &'ll Value> { Builder::new_block(self.cx, self.llfn(), name) } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 4b4ccb3b600b3..4d81c2894345b 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -36,7 +36,7 @@ use rustc::ty::subst::Substs; /// - `cx`: the crate context /// - `instance`: the instance to be instantiated pub fn get_fn( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>, ) -> &'ll Value { let tcx = cx.tcx; @@ -206,7 +206,7 @@ pub fn get_fn( } pub fn resolve_and_get_fn( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, def_id: DefId, substs: &'tcx Substs<'tcx>, ) -> &'ll Value { diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index c08937fa9b916..09c3a86a437a6 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -150,23 +150,23 @@ pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { } } -pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value { +pub fn C_bool(cx: &CodegenCx<'ll, '_, &'ll Value>, val: bool) -> &'ll Value { C_uint(Type::i1(cx), val as u64) } -pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value { +pub fn C_i32(cx: &CodegenCx<'ll, '_, &'ll Value>, i: i32) -> &'ll Value { C_int(Type::i32(cx), i as i64) } -pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value { +pub fn C_u32(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u32) -> &'ll Value { C_uint(Type::i32(cx), i as u64) } -pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { +pub fn C_u64(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { C_uint(Type::i64(cx), i) } -pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { +pub fn C_usize(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { let bit_size = cx.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow @@ -176,7 +176,7 @@ pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { C_uint(cx.isize_ty, i) } -pub fn C_u8(cx: &CodegenCx<'ll, '_>, i: u8) -> &'ll Value { +pub fn C_u8(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u8) -> &'ll Value { C_uint(Type::i8(cx), i as u64) } @@ -184,7 +184,7 @@ pub fn C_u8(cx: &CodegenCx<'ll, '_>, i: u8) -> &'ll Value { // This is a 'c-like' raw string, which differs from // our boxed-and-length-annotated strings. pub fn C_cstr( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, s: LocalInternedString, null_terminated: bool, ) -> &'ll Value { @@ -212,20 +212,28 @@ pub fn C_cstr( // NB: Do not use `do_spill_noroot` to make this into a constant string, or // you will be kicked off fast isel. See issue #4352 for an example of this. -pub fn C_str_slice(cx: &CodegenCx<'ll, '_>, s: LocalInternedString) -> &'ll Value { +pub fn C_str_slice(cx: &CodegenCx<'ll, '_, &'ll Value>, s: LocalInternedString) -> &'ll Value { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to()); C_fat_ptr(cx, cs, C_usize(cx, len as u64)) } -pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value { +pub fn C_fat_ptr( + cx: &CodegenCx<'ll, '_, &'ll Value>, + ptr: &'ll Value, + meta: &'ll Value +) -> &'ll Value { assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); C_struct(cx, &[ptr, meta], false) } -pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value { +pub fn C_struct( + cx: &CodegenCx<'ll, '_, &'ll Value>, + elts: &[&'ll Value], + packed: bool +) -> &'ll Value { C_struct_in_context(cx.llcx, elts, packed) } @@ -253,7 +261,7 @@ pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value { } } -pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value { +pub fn C_bytes(cx: &CodegenCx<'ll, '_, &'ll Value>, bytes: &[u8]) -> &'ll Value { C_bytes_in_context(cx.llcx, bytes) } @@ -351,7 +359,7 @@ pub fn langcall(tcx: TyCtxt, // of Java. (See related discussion on #1877 and #10183.) pub fn build_unchecked_lshift( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, lhs: &'ll Value, rhs: &'ll Value ) -> &'ll Value { @@ -362,7 +370,7 @@ pub fn build_unchecked_lshift( } pub fn build_unchecked_rshift( - bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value ) -> &'ll Value { let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); // #1877, #10183: Ensure that input is always valid @@ -375,13 +383,13 @@ pub fn build_unchecked_rshift( } } -fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value { +fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, rhs: &'ll Value) -> &'ll Value { let rhs_llty = val_ty(rhs); bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) } pub fn shift_mask_val( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llty: &'ll Type, mask_llty: &'ll Type, invert: bool @@ -405,7 +413,7 @@ pub fn shift_mask_val( } } -pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, ty: Ty<'tcx>) -> ty::PolyFnSig<'tcx> { diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 9228870bf3a5c..f41808f675af7 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -43,7 +43,7 @@ pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { } } -fn set_global_alignment(cx: &CodegenCx<'ll, '_>, +fn set_global_alignment(cx: &CodegenCx<'ll, '_, &'ll Value>, gv: &'ll Value, mut align: Align) { // The target may require greater alignment for globals than the type does. @@ -63,7 +63,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, } pub fn addr_of_mut( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, cv: &'ll Value, align: Align, kind: Option<&str>, @@ -88,7 +88,7 @@ pub fn addr_of_mut( } pub fn addr_of( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, cv: &'ll Value, align: Align, kind: Option<&str>, @@ -112,7 +112,7 @@ pub fn addr_of( gv } -pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value { +pub fn get_static(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> &'ll Value { let instance = Instance::mono(cx.tcx, def_id); if let Some(&g) = cx.instances.borrow().get(&instance) { return g; @@ -234,7 +234,7 @@ pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value { } fn check_and_apply_linkage( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: LocalInternedString, @@ -293,7 +293,7 @@ fn check_and_apply_linkage( } pub fn codegen_static<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx>, + cx: &CodegenCx<'a, 'tcx, &'a Value>, def_id: DefId, is_mutable: bool, ) { diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 826df82193a31..443e62718e4e6 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -45,7 +45,7 @@ use abi::Abi; /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub struct CodegenCx<'a, 'tcx: 'a> { +pub struct CodegenCx<'a, 'tcx: 'a, V> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, pub check_overflow: bool, pub use_dll_storage_attrs: bool, @@ -57,12 +57,11 @@ pub struct CodegenCx<'a, 'tcx: 'a> { pub codegen_unit: Arc>, /// Cache instances of monomorphic and polymorphic items - pub instances: RefCell, &'a Value>>, + pub instances: RefCell, V>>, /// Cache generated vtables - pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), - &'a Value>>, + pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), V>>, /// Cache of constant strings, - pub const_cstr_cache: RefCell>, + pub const_cstr_cache: RefCell>, /// Reverse-direction for const ptrs cast from globals. /// Key is a Value holding a *T, @@ -72,20 +71,20 @@ pub struct CodegenCx<'a, 'tcx: 'a> { /// when we ptrcast, and we have to ptrcast during codegen /// of a [T] const because we form a slice, a (*T,usize) pair, not /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell>, + pub const_unsized: RefCell>, /// Cache of emitted const globals (value -> global) - pub const_globals: RefCell>, + pub const_globals: RefCell>, /// List of globals for static variables which need to be passed to the /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete. /// (We have to make sure we don't invalidate any Values referring /// to constants.) - pub statics_to_rauw: RefCell>, + pub statics_to_rauw: RefCell>, /// Statics that will be placed in the llvm.used variable /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details - pub used_statics: RefCell>, + pub used_statics: RefCell>, pub lltypes: RefCell, Option), &'a Type>>, pub scalar_lltypes: RefCell, &'a Type>>, @@ -94,17 +93,17 @@ pub struct CodegenCx<'a, 'tcx: 'a> { pub dbg_cx: Option>, - eh_personality: Cell>, - eh_unwind_resume: Cell>, - pub rust_try_fn: Cell>, + eh_personality: Cell>, + eh_unwind_resume: Cell>, + pub rust_try_fn: Cell>, - intrinsics: RefCell>, + intrinsics: RefCell>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell, } -impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> { +impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx, &'a Value> { } pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { @@ -218,11 +217,11 @@ pub unsafe fn create_module( llmod } -impl<'a, 'tcx> CodegenCx<'a, 'tcx> { +impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, codegen_unit: Arc>, llvm_module: &'a ::ModuleLlvm) - -> CodegenCx<'a, 'tcx> { + -> CodegenCx<'a, 'tcx, &'a Value> { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these @@ -316,7 +315,7 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { } } -impl<'b, 'tcx> CodegenCx<'b, 'tcx> { +impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { pub fn sess<'a>(&'a self) -> &'a Session { &self.tcx.sess } @@ -446,25 +445,25 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { } } -impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx> { +impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.tcx.data_layout } } -impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx> { +impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn target_spec(&self) -> &Target { &self.tcx.sess.target.target } } -impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx> { +impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { self.tcx } } -impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { +impl LayoutOf for &'a CodegenCx<'ll, 'tcx, &'ll Value> { type Ty = Ty<'tcx>; type TyLayout = TyLayout<'tcx>; @@ -479,7 +478,7 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { } /// Declare any llvm intrinsics that you might need -fn declare_intrinsic(cx: &CodegenCx<'ll, '_>, key: &str) -> Option<&'ll Value> { +fn declare_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, key: &str) -> Option<&'ll Value> { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index 56352ae963f20..9582f175ceecd 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -16,6 +16,7 @@ use llvm; use llvm::debuginfo::DIScope; use common::CodegenCx; use rustc::mir::{Mir, SourceScope}; +use value::Value; use libc::c_uint; @@ -44,7 +45,7 @@ impl MirDebugScope<'ll> { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, debug_context: &FunctionDebugContext<'ll>, ) -> IndexVec> { @@ -79,7 +80,7 @@ pub fn create_mir_scopes( scopes } -fn make_mir_scope(cx: &CodegenCx<'ll, '_>, +fn make_mir_scope(cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, has_variables: &BitSet, debug_context: &FunctionDebugContextData<'ll>, diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index f6faddb894ffd..627260604d503 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -24,7 +24,7 @@ use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { +pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder<'_, 'll, '_, &'ll Value>) { if needs_gdb_debug_scripts_section(bx.cx) { let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx); // Load just the first byte as that's all that's necessary to force @@ -40,7 +40,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { /// Allocates the global variable responsible for the .debug_gdb_scripts binary /// section. -pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) +pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Value { let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0"; let section_var_name = &c_section_var_name[..c_section_var_name.len()-1]; @@ -75,7 +75,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) }) } -pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool { +pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'ll, '_, &'ll Value>) -> bool { let omit_gdb_pretty_printer_section = attr::contains_name(&cx.tcx.hir.krate_attrs(), "omit_gdb_pretty_printer_section"); diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 846d505641103..5629cf5a591d8 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -152,7 +152,7 @@ impl TypeMap<'ll, 'tcx> { // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given // type has been requested before, this is just a table lookup. Otherwise an // ID will be generated and stored for later lookup. - fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>, + fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx, &'a Value>, type_: Ty<'tcx>) -> UniqueTypeId { // Let's see if we already have something in the cache if let Some(unique_type_id) = self.type_to_unique_id.get(&type_).cloned() { @@ -182,7 +182,7 @@ impl TypeMap<'ll, 'tcx> { // types of their own, so they need special handling. We still need a // UniqueTypeId for them, since to debuginfo they *are* real types. fn get_unique_type_id_of_enum_variant<'a>(&mut self, - cx: &CodegenCx<'a, 'tcx>, + cx: &CodegenCx<'a, 'tcx, &'a Value>, enum_type: Ty<'tcx>, variant_name: &str) -> UniqueTypeId { @@ -211,7 +211,7 @@ enum RecursiveTypeDescription<'ll, 'tcx> { } fn create_and_register_recursive_type_forward_declaration( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: &'ll DICompositeType, @@ -235,7 +235,7 @@ impl RecursiveTypeDescription<'ll, 'tcx> { // Finishes up the description of the type in question (mostly by providing // descriptions of the fields of the given type) and returns the final type // metadata. - fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> { + fn finalize(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> MetadataCreationResult<'ll> { match *self { FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false), UnfinishedMetadata { @@ -288,7 +288,7 @@ macro_rules! return_if_metadata_created_in_meantime { } fn fixed_vec_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, unique_type_id: UniqueTypeId, array_or_slice_type: Ty<'tcx>, element_type: Ty<'tcx>, @@ -325,7 +325,7 @@ fn fixed_vec_metadata( } fn vec_slice_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, slice_ptr_type: Ty<'tcx>, element_type: Ty<'tcx>, unique_type_id: UniqueTypeId, @@ -375,7 +375,7 @@ fn vec_slice_metadata( } fn subroutine_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, unique_type_id: UniqueTypeId, signature: ty::PolyFnSig<'tcx>, span: Span, @@ -417,7 +417,7 @@ fn subroutine_type_metadata( // of a DST struct, there is no trait_object_type and the results of this // function will be a little bit weird. fn trait_pointer_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, trait_type: Ty<'tcx>, trait_object_type: Option>, unique_type_id: UniqueTypeId, @@ -480,7 +480,7 @@ fn trait_pointer_metadata( } pub fn type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, t: Ty<'tcx>, usage_site_span: Span, ) -> &'ll DIType { @@ -700,7 +700,7 @@ pub fn type_metadata( metadata } -pub fn file_metadata(cx: &CodegenCx<'ll, '_>, +pub fn file_metadata(cx: &CodegenCx<'ll, '_, &'ll Value>, file_name: &FileName, defining_crate: CrateNum) -> &'ll DIFile { debug!("file_metadata: file_name: {}, defining_crate: {}", @@ -718,11 +718,11 @@ pub fn file_metadata(cx: &CodegenCx<'ll, '_>, file_metadata_raw(cx, &file_name.to_string(), &directory.to_string_lossy()) } -pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { +pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll DIFile { file_metadata_raw(cx, "", "") } -fn file_metadata_raw(cx: &CodegenCx<'ll, '_>, +fn file_metadata_raw(cx: &CodegenCx<'ll, '_, &'ll Value>, file_name: &str, directory: &str) -> &'ll DIFile { @@ -748,7 +748,7 @@ fn file_metadata_raw(cx: &CodegenCx<'ll, '_>, file_metadata } -fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { +fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, t: Ty<'tcx>) -> &'ll DIType { debug!("basic_type_metadata: {:?}", t); let (name, encoding) = match t.sty { @@ -784,7 +784,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { } fn foreign_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, t: Ty<'tcx>, unique_type_id: UniqueTypeId, ) -> &'ll DIType { @@ -795,7 +795,7 @@ fn foreign_type_metadata( } fn pointer_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, pointer_type: Ty<'tcx>, pointee_type_metadata: &'ll DIType, ) -> &'ll DIType { @@ -929,7 +929,7 @@ enum MemberDescriptionFactory<'ll, 'tcx> { } impl MemberDescriptionFactory<'ll, 'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { match *self { StructMDF(ref this) => { @@ -963,7 +963,7 @@ struct StructMemberDescriptionFactory<'tcx> { } impl<'tcx> StructMemberDescriptionFactory<'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { let layout = cx.layout_of(self.ty); self.variant.fields.iter().enumerate().map(|(i, f)| { @@ -988,7 +988,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { fn prepare_struct_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, struct_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, @@ -1033,7 +1033,7 @@ struct TupleMemberDescriptionFactory<'tcx> { } impl<'tcx> TupleMemberDescriptionFactory<'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { let layout = cx.layout_of(self.ty); self.component_types.iter().enumerate().map(|(i, &component_type)| { @@ -1051,7 +1051,7 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { } fn prepare_tuple_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, tuple_type: Ty<'tcx>, component_types: &[Ty<'tcx>], unique_type_id: UniqueTypeId, @@ -1087,7 +1087,7 @@ struct UnionMemberDescriptionFactory<'tcx> { } impl<'tcx> UnionMemberDescriptionFactory<'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { self.variant.fields.iter().enumerate().map(|(i, f)| { let field = self.layout.field(cx, i); @@ -1105,7 +1105,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { } fn prepare_union_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, union_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, @@ -1156,7 +1156,7 @@ struct EnumMemberDescriptionFactory<'ll, 'tcx> { } impl EnumMemberDescriptionFactory<'ll, 'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { let adt = &self.enum_type.ty_adt_def().unwrap(); match self.layout.variants { @@ -1241,7 +1241,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { // of discriminant instead of us having to recover its path. // Right now it's not even going to work for `niche_start > 0`, // and for multiple niche variants it only supports the first. - fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, name: &mut String, layout: TyLayout<'tcx>, offset: Size, @@ -1291,7 +1291,7 @@ struct VariantMemberDescriptionFactory<'ll, 'tcx> { } impl VariantMemberDescriptionFactory<'ll, 'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { self.args.iter().enumerate().map(|(i, &(ref name, ty))| { let (size, align) = cx.size_and_align_of(ty); @@ -1322,7 +1322,7 @@ enum EnumDiscriminantInfo<'ll> { // descriptions of the fields of the variant. This is a rudimentary version of a // full RecursiveTypeDescription. fn describe_enum_variant( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, layout: layout::TyLayout<'tcx>, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo<'ll>, @@ -1384,7 +1384,7 @@ fn describe_enum_variant( } fn prepare_enum_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, enum_type: Ty<'tcx>, enum_def_id: DefId, unique_type_id: UniqueTypeId, @@ -1502,7 +1502,7 @@ fn prepare_enum_metadata( }), ); - fn get_enum_discriminant_name(cx: &CodegenCx, + fn get_enum_discriminant_name(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> InternedString { cx.tcx.item_name(def_id) @@ -1514,7 +1514,7 @@ fn prepare_enum_metadata( /// /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. fn composite_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, composite_type: Ty<'tcx>, composite_type_name: &str, composite_type_unique_id: UniqueTypeId, @@ -1540,7 +1540,7 @@ fn composite_type_metadata( composite_type_metadata } -fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, +fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_, &'ll Value>, composite_type_metadata: &'ll DICompositeType, member_descriptions: Vec>) { // In some rare cases LLVM metadata uniquing would lead to an existing type @@ -1591,7 +1591,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, // any caching, does not add any fields to the struct. This can be done later // with set_members_of_composite_type(). fn create_struct_stub( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, struct_type: Ty<'tcx>, struct_type_name: &str, unique_type_id: UniqueTypeId, @@ -1629,7 +1629,7 @@ fn create_struct_stub( } fn create_union_stub( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, union_type: Ty<'tcx>, union_type_name: &str, unique_type_id: UniqueTypeId, @@ -1668,7 +1668,7 @@ fn create_union_stub( /// /// Adds the created metadata nodes directly to the crate's IR. pub fn create_global_var_metadata( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId, global: &'ll Value, ) { @@ -1730,7 +1730,7 @@ pub fn create_global_var_metadata( // Creates an "extension" of an existing DIScope into another file. pub fn extend_scope_to_file( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, scope_metadata: &'ll DIScope, file: &syntax_pos::SourceFile, defining_crate: CrateNum, @@ -1749,7 +1749,7 @@ pub fn extend_scope_to_file( /// /// Adds the created metadata nodes directly to the crate's IR. pub fn create_vtable_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, ty: ty::Ty<'tcx>, vtable: &'ll Value, ) { diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 042e72e921ece..532ec589811c4 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -158,7 +158,7 @@ pub enum VariableKind { } /// Create any deferred debug metadata nodes -pub fn finalize(cx: &CodegenCx) { +pub fn finalize(cx: &CodegenCx<'ll, '_, &'ll Value>) { if cx.dbg_cx.is_none() { return; } @@ -209,7 +209,7 @@ pub fn finalize(cx: &CodegenCx) { /// FunctionDebugContext enum which indicates why no debuginfo should be created /// for the function. pub fn create_function_debug_context( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, llfn: &'ll Value, @@ -310,7 +310,7 @@ pub fn create_function_debug_context( return FunctionDebugContext::RegularContext(fn_debug_context); fn get_function_signature( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, ) -> &'ll DIArray { if cx.sess().opts.debuginfo == DebugInfo::Limited { @@ -373,7 +373,7 @@ pub fn create_function_debug_context( } fn get_template_parameters( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, generics: &ty::Generics, substs: &Substs<'tcx>, file_metadata: &'ll DIFile, @@ -429,7 +429,7 @@ pub fn create_function_debug_context( create_DIArray(DIB(cx), &template_params[..]) } - fn get_parameter_names(cx: &CodegenCx, + fn get_parameter_names(cx: &CodegenCx<'ll, '_, &'ll Value>, generics: &ty::Generics) -> Vec { let mut names = generics.parent.map_or(vec![], |def_id| { @@ -440,7 +440,7 @@ pub fn create_function_debug_context( } fn get_containing_scope( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>, ) -> &'ll DIScope { // First, let's see if this is a method within an inherent impl. Because @@ -483,7 +483,7 @@ pub fn create_function_debug_context( } pub fn declare_local( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dbg_context: &FunctionDebugContext<'ll>, variable_name: ast::Name, variable_type: Ty<'tcx>, diff --git a/src/librustc_codegen_llvm/debuginfo/namespace.rs b/src/librustc_codegen_llvm/debuginfo/namespace.rs index 06f8a4b131b60..173a297f52eba 100644 --- a/src/librustc_codegen_llvm/debuginfo/namespace.rs +++ b/src/librustc_codegen_llvm/debuginfo/namespace.rs @@ -20,18 +20,19 @@ use llvm::debuginfo::DIScope; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use common::CodegenCx; +use value::Value; use rustc_data_structures::small_c_str::SmallCStr; pub fn mangled_name_of_instance<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx>, + cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: Instance<'tcx>, ) -> ty::SymbolName { let tcx = cx.tcx; tcx.symbol_name(instance) } -pub fn item_namespace(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { +pub fn item_namespace(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> &'ll DIScope { if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) { return scope; } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 60ebcb888166f..a9da3acbd9425 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -20,13 +20,14 @@ use builder::Builder; use libc::c_uint; use syntax_pos::{Span, Pos}; +use value::Value; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). pub fn set_source_location( debug_context: &FunctionDebugContext<'ll>, - bx: &Builder<'_, 'll, '_>, + bx: &Builder<'_, 'll, '_, &'ll Value>, scope: Option<&'ll DIScope>, span: Span, ) { @@ -78,7 +79,7 @@ impl InternalDebugLocation<'ll> { } } -pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDebugLocation<'ll>) { +pub fn set_debug_location(bx: &Builder<'_, 'll, '_, &'ll Value>, debug_location: InternalDebugLocation<'ll>) { let metadata_node = match debug_location { KnownLocation { scope, line, col } => { // For MSVC, set the column number to zero. diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index f5abb527e430f..bfb0d1cfa6e18 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -14,6 +14,7 @@ use common::CodegenCx; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; +use value::Value; use rustc::hir; @@ -21,7 +22,7 @@ use rustc::hir; // any caching, i.e. calling the function twice with the same type will also do // the work twice. The `qualified` parameter only affects the first level of the // type name, further levels (i.e. type parameters) are always fully qualified. -pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, t: Ty<'tcx>, qualified: bool) -> String { @@ -32,7 +33,7 @@ pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, // Pushes the name of the type as it should be stored in debuginfo on the // `output` String. See also compute_debuginfo_type_name(). -pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, t: Ty<'tcx>, qualified: bool, output: &mut String) { @@ -181,7 +182,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } } - fn push_item_name(cx: &CodegenCx, + fn push_item_name(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId, qualified: bool, output: &mut String) { @@ -201,7 +202,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, // reconstructed for items from non-local crates. For local crates, this // would be possible but with inlining and LTO we have to use the least // common denominator - otherwise we would run into conflicts. - fn push_type_params<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + fn push_type_params<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, substs: &Substs<'tcx>, output: &mut String) { if substs.types().next().is_none() { diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index 19bc4ac39d308..fecf22a3a61f3 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -19,10 +19,11 @@ use rustc::ty::DefIdTree; use llvm; use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; use common::{CodegenCx}; +use value::Value; use syntax_pos::{self, Span}; -pub fn is_node_local_to_unit(cx: &CodegenCx, def_id: DefId) -> bool +pub fn is_node_local_to_unit(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the // current compilation unit (i.e. if it is *static* in the C-sense). The @@ -46,22 +47,22 @@ pub fn create_DIArray( } /// Return syntax_pos::Loc corresponding to the beginning of the span -pub fn span_start(cx: &CodegenCx, span: Span) -> syntax_pos::Loc { +pub fn span_start(cx: &CodegenCx<'ll, '_, &'ll Value>, span: Span) -> syntax_pos::Loc { cx.sess().source_map().lookup_char_pos(span.lo()) } #[inline] -pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx>) -> &'a CrateDebugContext<'ll, 'tcx> { +pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> &'a CrateDebugContext<'ll, 'tcx> { cx.dbg_cx.as_ref().unwrap() } #[inline] #[allow(non_snake_case)] -pub fn DIB(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> { +pub fn DIB(cx: &'a CodegenCx<'ll, '_, &'ll Value>) -> &'a DIBuilder<'ll> { cx.dbg_cx.as_ref().unwrap().builder } -pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { +pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> &'ll DIScope { item_namespace(cx, cx.tcx.parent(def_id) .expect("get_namespace_for_item: missing parent?")) } diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 26969e24f0883..08c16caec21e4 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -39,7 +39,10 @@ use value::Value; /// /// If there’s a value with the same name already declared, the function will /// return its Value instead. -pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value { +pub fn declare_global( + cx: &CodegenCx<'ll, '_, &'ll Value>, + name: &str, ty: &'ll Type +) -> &'ll Value { debug!("declare_global(name={:?})", name); let namebuf = SmallCStr::new(name); unsafe { @@ -53,7 +56,7 @@ pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'l /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. fn declare_raw_fn( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str, callconv: llvm::CallConv, ty: &'ll Type, @@ -117,7 +120,11 @@ fn declare_raw_fn( /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. -pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> &'ll Value { +pub fn declare_cfn( + cx: &CodegenCx<'ll, '_, &'ll Value>, + name: &str, + fn_type: &'ll Type +) -> &'ll Value { declare_raw_fn(cx, name, llvm::CCallConv, fn_type) } @@ -127,7 +134,7 @@ pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> & /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. pub fn declare_fn( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, name: &str, fn_type: Ty<'tcx>, ) -> &'ll Value { @@ -159,7 +166,11 @@ pub fn declare_fn( /// return None if the name already has a definition associated with it. In that /// case an error should be reported to the user, because it usually happens due /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> { +pub fn define_global( + cx: &CodegenCx<'ll, '_, &'ll Value>, + name: &str, + ty: &'ll Type +) -> Option<&'ll Value> { if get_defined_value(cx, name).is_some() { None } else { @@ -182,7 +193,7 @@ pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Val /// return panic if the name already has a definition associated with it. This /// can happen with #[no_mangle] or #[export_name], for example. pub fn define_fn( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, name: &str, fn_type: Ty<'tcx>, ) -> &'ll Value { @@ -199,7 +210,7 @@ pub fn define_fn( /// return panic if the name already has a definition associated with it. This /// can happen with #[no_mangle] or #[export_name], for example. pub fn define_internal_fn( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, name: &str, fn_type: Ty<'tcx>, ) -> &'ll Value { @@ -210,7 +221,7 @@ pub fn define_internal_fn( /// Get declared value by name. -pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { +pub fn get_declared_value(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Option<&'ll Value> { debug!("get_declared_value(name={:?})", name); let namebuf = SmallCStr::new(name); unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) } @@ -218,7 +229,7 @@ pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Va /// Get defined or externally defined (AvailableExternally linkage) value by /// name. -pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { +pub fn get_defined_value(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Option<&'ll Value> { get_declared_value(cx, name).and_then(|val|{ let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 842bdf3cb493f..b06c220c2e1c2 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -22,7 +22,7 @@ use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Option<&'ll Value>) +pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx, &'ll Value>, t: Ty<'tcx>, info: Option<&'ll Value>) -> (&'ll Value, &'ll Value) { debug!("calculate size of DST: {}; with lost info: {:?}", t, info); diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 0e108a1fd13d1..b6735e54c9944 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -36,7 +36,7 @@ use syntax_pos::Span; use std::cmp::Ordering; use std::iter; -fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { +fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Option<&'ll Value> { let llvm_name = match name { "sqrtf32" => "llvm.sqrt.f32", "sqrtf64" => "llvm.sqrt.f64", @@ -87,7 +87,7 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_codegen_llvm/context.rs pub fn codegen_intrinsic_call( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callee_ty: Ty<'tcx>, fn_ty: &FnType<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, &'ll Value>], @@ -546,7 +546,10 @@ pub fn codegen_intrinsic_call( assert_eq!(x.len(), 1); x.into_iter().next().unwrap() } - fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> { + fn ty_to_type( + cx: &CodegenCx<'ll, '_, &'ll Value>, + t: &intrinsics::Type + ) -> Vec<&'ll Type> { use intrinsics::Type::*; match *t { Void => vec![Type::void(cx)], @@ -589,7 +592,7 @@ pub fn codegen_intrinsic_call( // arguments to be truncated as needed and pointers to be // cast. fn modify_as_needed( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, t: &intrinsics::Type, arg: &OperandRef<'tcx, &'ll Value>, ) -> Vec<&'ll Value> { @@ -679,7 +682,7 @@ pub fn codegen_intrinsic_call( } fn copy_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, allow_overlap: bool, volatile: bool, ty: Ty<'tcx>, @@ -715,7 +718,7 @@ fn copy_intrinsic( } fn memset_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, @@ -731,8 +734,8 @@ fn memset_intrinsic( } fn try_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, @@ -757,8 +760,8 @@ fn try_intrinsic( // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. fn codegen_msvc_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, @@ -866,8 +869,8 @@ fn codegen_msvc_try( // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. fn codegen_gnu_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, @@ -927,11 +930,11 @@ fn codegen_gnu_try( // Helper function to give a Block to a closure to codegen a shim function. // This is currently primarily used for the `try` intrinsic functions above. fn gen_fn<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, name: &str, inputs: Vec>, output: Ty<'tcx>, - codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), + codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx, &'ll Value>), ) -> &'ll Value { let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig( inputs.into_iter(), @@ -952,8 +955,8 @@ fn gen_fn<'ll, 'tcx>( // // This function is only generated once and is then cached. fn get_rust_try_fn<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx, &'ll Value>), ) -> &'ll Value { if let Some(llfn) = cx.rust_try_fn.get() { return llfn; @@ -980,7 +983,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, name: &str, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], @@ -1156,7 +1159,7 @@ fn generic_simd_intrinsic( in_elem: &::rustc::ty::TyS, in_ty: &::rustc::ty::TyS, in_len: usize, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, span: Span, args: &[OperandRef<'tcx, &'ll Value>], ) -> Result<&'ll Value, ()> { @@ -1274,7 +1277,7 @@ fn generic_simd_intrinsic( } } - fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize, + fn llvm_vector_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, elem_ty: ty::Ty, vec_len: usize, mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { @@ -1755,7 +1758,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, // Returns None if the type is not an integer // FIXME: there’s multiple of this functions, investigate using some of the already existing // stuffs. -fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> { +fn int_type_width_signed(ty: Ty, cx: &CodegenCx<'ll, '_, &'ll Value>) -> Option<(u64, bool)> { match ty.sty { ty::Int(t) => Some((match t { ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 29c2e71960c2c..9c964ef30718f 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -33,7 +33,7 @@ impl<'a, 'tcx> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>, + pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llvtable: &'ll Value, fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value { // Load the data pointer from the object. @@ -48,7 +48,7 @@ impl<'a, 'tcx> VirtualIndex { ptr } - pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'ll Value { + pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llvtable: &'ll Value) -> &'ll Value { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); @@ -70,7 +70,7 @@ impl<'a, 'tcx> VirtualIndex { /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. pub fn get_vtable( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, ty: Ty<'tcx>, trait_ref: ty::PolyExistentialTraitRef<'tcx>, ) -> &'ll Value { diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs index a0d6cc4629589..7cd360877c38c 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_llvm/mir/analyze.rs @@ -21,8 +21,9 @@ use rustc::ty; use rustc::ty::layout::LayoutOf; use type_of::LayoutLlvmExt; use super::FunctionCx; +use value::Value; -pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { +pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet { let mir = fx.mir; let mut analyzer = LocalAnalyzer::new(fx); @@ -51,8 +52,8 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { analyzer.non_ssa_locals } -struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { - fx: &'mir FunctionCx<'a, 'll, 'tcx>, +struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, V: 'll> { + fx: &'mir FunctionCx<'a, 'll, 'tcx, V>, dominators: Dominators, non_ssa_locals: BitSet, // The location of the first visited direct assignment to each @@ -60,8 +61,8 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { first_assignment: IndexVec } -impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { - fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx>) -> Self { +impl LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { + fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> Self { let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); let mut analyzer = LocalAnalyzer { @@ -102,7 +103,7 @@ impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { } } -impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { +impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { fn visit_assign(&mut self, block: mir::BasicBlock, place: &mir::Place<'tcx>, diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 570eb40ee68f0..708ca8ffba267 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -34,7 +34,7 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { pub fn codegen_block(&mut self, bb: mir::BasicBlock) { let mut bx = self.build_block(bb); let data = &self.mir[bb]; @@ -49,7 +49,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } fn codegen_terminator(&mut self, - mut bx: Builder<'a, 'll, 'tcx>, + mut bx: Builder<'a, 'll, 'tcx, &'ll Value>, bb: mir::BasicBlock, terminator: &mir::Terminator<'tcx>) { @@ -98,7 +98,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } }; - let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| { + let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_, &'ll Value>, target: mir::BasicBlock| { let (lltarget, is_cleanupret) = lltarget(this, target); if is_cleanupret { // micro-optimization: generate a `ret` rather than a jump @@ -111,7 +111,7 @@ impl FunctionCx<'a, 'll, 'tcx> { let do_call = | this: &mut Self, - bx: Builder<'a, 'll, 'tcx>, + bx: Builder<'a, 'll, 'tcx, &'ll Value>, fn_ty: FnType<'tcx, Ty<'tcx>>, fn_ptr: &'ll Value, llargs: &[&'ll Value], @@ -691,7 +691,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } fn codegen_argument(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, op: OperandRef<'tcx, &'ll Value>, llargs: &mut Vec<&'ll Value>, arg: &ArgType<'tcx, Ty<'tcx>>) { @@ -779,7 +779,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } fn codegen_arguments_untupled(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, operand: &mir::Operand<'tcx>, llargs: &mut Vec<&'ll Value>, args: &[ArgType<'tcx, Ty<'tcx>>]) { @@ -803,7 +803,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> { + fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> PlaceRef<'tcx, &'ll Value> { let cx = bx.cx; if let Some(slot) = self.personality_slot { slot @@ -866,17 +866,17 @@ impl FunctionCx<'a, 'll, 'tcx> { }) } - pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx> { + pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx, &'ll Value> { Builder::new_block(self.cx, self.llfn, name) } - pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx> { + pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx, &'ll Value> { let bx = Builder::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } - fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, + fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) -> ReturnDest<'tcx, &'ll Value> { @@ -933,7 +933,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>, + fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, src: &mir::Operand<'tcx>, dst: &mir::Place<'tcx>) { if let mir::Place::Local(index) = *dst { @@ -961,7 +961,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>, + fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, src: &mir::Operand<'tcx>, dst: PlaceRef<'tcx, &'ll Value>) { let src = self.codegen_operand(bx, src); @@ -974,7 +974,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // Stores the return value of a function call into it's final location. fn store_return(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: ReturnDest<'tcx, &'ll Value>, ret_ty: &ArgType<'tcx, Ty<'tcx>>, llval: &'ll Value) { diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 9f0f744389089..df7b4ec1a9861 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -32,7 +32,7 @@ use super::super::callee; use super::FunctionCx; pub fn scalar_to_llvm( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, cv: Scalar, layout: &layout::Scalar, llty: &'ll Type, @@ -86,7 +86,7 @@ pub fn scalar_to_llvm( } } -pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { +pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocation) -> &'ll Value { let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); let layout = cx.data_layout(); let pointer_size = layout.pointer_size.bytes() as usize; @@ -122,7 +122,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll } pub fn codegen_static_initializer( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, def_id: DefId, ) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { let instance = ty::Instance::mono(cx.tcx, def_id); @@ -140,10 +140,10 @@ pub fn codegen_static_initializer( Ok((const_alloc_to_llvm(cx, alloc), alloc)) } -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { fn fully_evaluate( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, constant: &'tcx ty::Const<'tcx>, ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { match constant.val { @@ -163,7 +163,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn eval_mir_constant( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, constant: &mir::Constant<'tcx>, ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { let c = self.monomorphize(&constant.literal); @@ -173,7 +173,7 @@ impl FunctionCx<'a, 'll, 'tcx> { /// process constant containing SIMD shuffle indices pub fn simd_shuffle_indices( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, span: Span, ty: Ty<'tcx>, constant: Result<&'tcx ty::Const<'tcx>, Lrc>>, diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index a7208b416a62e..fa1380854d932 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -43,16 +43,16 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { +pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, debug_context: FunctionDebugContext<'ll>, - llfn: &'ll Value, + llfn: V, - cx: &'a CodegenCx<'ll, 'tcx>, + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, fn_ty: FnType<'tcx, Ty<'tcx>>, @@ -63,7 +63,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - personality_slot: Option>, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` blocks: IndexVec, @@ -72,7 +72,8 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { cleanup_kinds: IndexVec, /// When targeting MSVC, this stores the cleanup info for each funclet - /// BB. This is initialized as we compute the funclets' head block in RPO. + /// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets' + /// head block in RPO. funclets: &'a IndexVec>>, /// This stores the landing-pad block for a given BB, computed lazily on GNU @@ -97,7 +98,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec>, + locals: IndexVec>, /// Debug information for MIR scopes. scopes: IndexVec>, @@ -106,7 +107,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { param_substs: &'tcx Substs<'tcx>, } -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { @@ -117,7 +118,7 @@ impl FunctionCx<'a, 'll, 'tcx> { ) } - pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_>, source_info: mir::SourceInfo) { + pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_, &'ll Value>, source_info: mir::SourceInfo) { let (scope, span) = self.debug_loc(source_info); debuginfo::set_source_location(&self.debug_context, bx, scope, span); } @@ -189,7 +190,10 @@ enum LocalRef<'tcx, V> { } impl LocalRef<'tcx, &'ll Value> { - fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx, &'ll Value> { + fn new_operand( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + layout: TyLayout<'tcx> + ) -> LocalRef<'tcx, &'ll Value> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -204,7 +208,7 @@ impl LocalRef<'tcx, &'ll Value> { /////////////////////////////////////////////////////////////////////////// pub fn codegen_mir( - cx: &'a CodegenCx<'ll, 'tcx>, + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, llfn: &'ll Value, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, @@ -364,7 +368,7 @@ pub fn codegen_mir( fn create_funclets( mir: &'a Mir<'tcx>, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, cleanup_kinds: &IndexVec, block_bxs: &IndexVec) -> (IndexVec>, @@ -432,8 +436,8 @@ fn create_funclets( /// argument's value. As arguments are places, these are always /// indirect. fn arg_local_refs( - bx: &Builder<'a, 'll, 'tcx>, - fx: &FunctionCx<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>, scopes: &IndexVec>, memory_locals: &BitSet, ) -> Vec> { diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 176ba8b66ba7e..ab67a35895cef 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -17,7 +17,7 @@ use rustc_data_structures::sync::Lrc; use base; use common::{CodegenCx, C_undef, C_usize}; use builder::{Builder, MemFlags}; -use value::Value; +use value::{Value, ValueTrait}; use type_of::LayoutLlvmExt; use type_::Type; use glue; @@ -61,14 +61,14 @@ pub struct OperandRef<'tcx, V> { pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'tcx, &'ll Value> { +impl fmt::Debug for OperandRef<'tcx, &'ll Value> where Value : ValueTrait { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } impl OperandRef<'tcx, &'ll Value> { - pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>, + pub fn new_zst(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { assert!(layout.is_zst()); OperandRef { @@ -77,7 +77,7 @@ impl OperandRef<'tcx, &'ll Value> { } } - pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, + pub fn from_const(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, val: &'tcx ty::Const<'tcx>) -> Result, Lrc>> { let layout = bx.cx.layout_of(val.ty); @@ -141,7 +141,7 @@ impl OperandRef<'tcx, &'ll Value> { } } - pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> { + pub fn deref(self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> PlaceRef<'tcx, &'ll Value> { let projected_ty = self.layout.ty.builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { @@ -160,7 +160,7 @@ impl OperandRef<'tcx, &'ll Value> { /// If this operand is a `Pair`, we return an aggregate with the two values. /// For other cases, see `immediate`. - pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value { + pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> &'ll Value { if let OperandValue::Pair(a, b) = self.val { let llty = self.layout.llvm_type(bx.cx); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", @@ -176,7 +176,7 @@ impl OperandRef<'tcx, &'ll Value> { } /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. - pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, + pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llval: &'ll Value, layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { @@ -194,7 +194,10 @@ impl OperandRef<'tcx, &'ll Value> { OperandRef { val, layout } } - pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'tcx, &'ll Value> { + pub fn extract_field( + &self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + i: usize + ) -> OperandRef<'tcx, &'ll Value> { let field = self.layout.field(bx.cx, i); let offset = self.layout.fields.offset(i); @@ -253,25 +256,29 @@ impl OperandRef<'tcx, &'ll Value> { } impl OperandValue<&'ll Value> { - pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { + pub fn store(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { + pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { + pub fn unaligned_volatile_store( + self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + dest: PlaceRef<'tcx, &'ll Value> + ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) { + pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } fn store_with_flags( self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>, flags: MemFlags, ) { @@ -333,9 +340,9 @@ impl OperandValue<&'ll Value> { } } -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { fn maybe_codegen_consume_direct(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, place: &mir::Place<'tcx>) -> Option> { @@ -383,7 +390,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } pub fn codegen_consume(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, place: &mir::Place<'tcx>) -> OperandRef<'tcx, &'ll Value> { @@ -407,7 +414,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } pub fn codegen_operand(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, operand: &mir::Operand<'tcx>) -> OperandRef<'tcx, &'ll Value> { diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 603a5de4f6486..701e10ab79824 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -57,7 +57,7 @@ impl PlaceRef<'tcx, &'ll Value> { } pub fn from_const_alloc( - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, layout: TyLayout<'tcx>, alloc: &mir::interpret::Allocation, offset: Size, @@ -74,7 +74,7 @@ impl PlaceRef<'tcx, &'ll Value> { PlaceRef::new_sized(llval, layout, alloc.align) } - pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) + pub fn alloca(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, layout: TyLayout<'tcx>, name: &str) -> PlaceRef<'tcx, &'ll Value> { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); @@ -83,8 +83,11 @@ impl PlaceRef<'tcx, &'ll Value> { } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + pub fn alloca_unsized_indirect( + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + layout: TyLayout<'tcx>, + name: &str + ) -> PlaceRef<'tcx, &'ll Value> { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty); @@ -92,7 +95,7 @@ impl PlaceRef<'tcx, &'ll Value> { Self::alloca(bx, ptr_layout, name) } - pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value { + pub fn len(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Value { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); @@ -105,7 +108,7 @@ impl PlaceRef<'tcx, &'ll Value> { } } - pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> { + pub fn load(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> { debug!("PlaceRef::load: {:?}", self); assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); @@ -169,7 +172,10 @@ impl PlaceRef<'tcx, &'ll Value> { } /// Access a field, at a point when the value's case is known. - pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'tcx, &'ll Value> { + pub fn project_field( + self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + ix: usize + ) -> PlaceRef<'tcx, &'ll Value> { let cx = bx.cx; let field = self.layout.field(cx, ix); let offset = self.layout.fields.offset(ix); @@ -273,7 +279,7 @@ impl PlaceRef<'tcx, &'ll Value> { } /// Obtain the actual discriminant of a value. - pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value { + pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, cast_to: Ty<'tcx>) -> &'ll Value { let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); if self.layout.abi.is_uninhabited() { return C_undef(cast_to); @@ -337,7 +343,7 @@ impl PlaceRef<'tcx, &'ll Value> { /// Set the discriminant for a new value of the given case of the given /// representation. - pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) { + pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, variant_index: usize) { if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() { return; } @@ -391,7 +397,7 @@ impl PlaceRef<'tcx, &'ll Value> { } } - pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) + pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llindex: &'ll Value) -> PlaceRef<'tcx, &'ll Value> { PlaceRef { llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), @@ -401,7 +407,7 @@ impl PlaceRef<'tcx, &'ll Value> { } } - pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) + pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, variant_index: usize) -> PlaceRef<'tcx, &'ll Value> { let mut downcast = *self; downcast.layout = self.layout.for_variant(bx.cx, variant_index); @@ -413,18 +419,18 @@ impl PlaceRef<'tcx, &'ll Value> { downcast } - pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) { bx.lifetime_start(self.llval, self.layout.size); } - pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) { bx.lifetime_end(self.llval, self.layout.size); } } -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { pub fn codegen_place(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, place: &mir::Place<'tcx>) -> PlaceRef<'tcx, &'ll Value> { debug!("codegen_place(place={:?})", place); diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index b6debd961c71f..d1f4e5d4f6f55 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -32,12 +32,12 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { pub fn codegen_rvalue(&mut self, - bx: Builder<'a, 'll, 'tcx>, + bx: Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>, rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> + -> Builder<'a, 'll, 'tcx, &'ll Value> { debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue); @@ -199,9 +199,9 @@ impl FunctionCx<'a, 'll, 'tcx> { } pub fn codegen_rvalue_operand(&mut self, - bx: Builder<'a, 'll, 'tcx>, + bx: Builder<'a, 'll, 'tcx, &'ll Value>, rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'll, 'tcx>, OperandRef<'tcx, &'ll Value>) + -> (Builder<'a, 'll, 'tcx, &'ll Value>, OperandRef<'tcx, &'ll Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); @@ -538,7 +538,7 @@ impl FunctionCx<'a, 'll, 'tcx> { fn evaluate_array_len( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, place: &mir::Place<'tcx>, ) -> &'ll Value { // ZST are passed as operands and require special handling @@ -558,7 +558,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_scalar_binop( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, op: mir::BinOp, lhs: &'ll Value, rhs: &'ll Value, @@ -626,7 +626,7 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_fat_ptr_binop( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, op: mir::BinOp, lhs_addr: &'ll Value, lhs_extra: &'ll Value, @@ -673,7 +673,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } pub fn codegen_scalar_checked_binop(&mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, op: mir::BinOp, lhs: &'ll Value, rhs: &'ll Value, @@ -750,7 +750,7 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value { +fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_, &'ll Value>, ty: Ty) -> &'ll Value { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; @@ -815,7 +815,7 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> bx.cx.get_intrinsic(&name) } -fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, +fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, signed: bool, x: &'ll Value, int_ty: &'ll Type, @@ -845,7 +845,7 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, } } -fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, +fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, signed: bool, x: &'ll Value, float_ty: &'ll Type, diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs index 93be0074f6e9b..4e39fda1cce9c 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_llvm/mir/statement.rs @@ -16,12 +16,13 @@ use builder::Builder; use super::FunctionCx; use super::LocalRef; use super::OperandValue; +use value::Value; -impl FunctionCx<'a, 'll, 'tcx> { +impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { pub fn codegen_statement(&mut self, - bx: Builder<'a, 'll, 'tcx>, + bx: Builder<'a, 'll, 'tcx, &'ll Value>, statement: &mir::Statement<'tcx>) - -> Builder<'a, 'll, 'tcx> { + -> Builder<'a, 'll, 'tcx, &'ll Value> { debug!("codegen_statement(statement={:?})", statement); self.set_debug_loc(&bx, statement.source_info); diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index dab9b147cc070..fa83b22f3bd7f 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -30,13 +30,14 @@ use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::TypeFoldable; use rustc::ty::layout::LayoutOf; use std::fmt; +use value::Value; pub use rustc::mir::mono::MonoItem; pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { - fn define(&self, cx: &CodegenCx<'a, 'tcx>) { + fn define(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) { debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", self.to_string(cx.tcx), self.to_raw_string(), @@ -76,7 +77,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { } fn predefine(&self, - cx: &CodegenCx<'a, 'tcx>, + cx: &CodegenCx<'a, 'tcx, &'a Value>, linkage: Linkage, visibility: Visibility) { debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", @@ -123,7 +124,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} -fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, def_id: DefId, linkage: Linkage, visibility: Visibility, @@ -145,7 +146,7 @@ fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, cx.instances.borrow_mut().insert(instance, g); } -fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 51a233d791625..10e0361ee1bcb 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -16,6 +16,7 @@ use llvm; use llvm::{Bool, False, True, TypeKind}; use context::CodegenCx; +use value::Value; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; @@ -40,25 +41,25 @@ impl fmt::Debug for Type { } impl Type { - pub fn void(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn void(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(cx.llcx) } } - pub fn metadata(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn metadata(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMRustMetadataTypeInContext(cx.llcx) } } - pub fn i1(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i1(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMInt1TypeInContext(cx.llcx) } } - pub fn i8(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i8(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMInt8TypeInContext(cx.llcx) } @@ -70,32 +71,32 @@ impl Type { } } - pub fn i16(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i16(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMInt16TypeInContext(cx.llcx) } } - pub fn i32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i32(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMInt32TypeInContext(cx.llcx) } } - pub fn i64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i64(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMInt64TypeInContext(cx.llcx) } } - pub fn i128(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i128(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(cx.llcx, 128) } } // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix(cx: &CodegenCx<'ll, '_>, num_bits: u64) -> &'ll Type { + pub fn ix(cx: &CodegenCx<'ll, '_, &'ll Value>, num_bits: u64) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) } @@ -108,27 +109,27 @@ impl Type { } } - pub fn f32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn f32(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMFloatTypeInContext(cx.llcx) } } - pub fn f64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn f64(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMDoubleTypeInContext(cx.llcx) } } - pub fn bool(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn bool(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { Type::i8(cx) } - pub fn char(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn char(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { Type::i32(cx) } - pub fn i8p(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn i8p(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { Type::i8(cx).ptr_to() } @@ -136,11 +137,11 @@ impl Type { Type::i8_llcx(llcx).ptr_to() } - pub fn isize(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn isize(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { cx.isize_ty } - pub fn c_int(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn c_int(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { match &cx.tcx.sess.target.target.target_c_int_width[..] { "16" => Type::i16(cx), "32" => Type::i32(cx), @@ -149,7 +150,7 @@ impl Type { } } - pub fn int_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::IntTy) -> &'ll Type { + pub fn int_from_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::IntTy) -> &'ll Type { match t { ast::IntTy::Isize => cx.isize_ty, ast::IntTy::I8 => Type::i8(cx), @@ -160,7 +161,7 @@ impl Type { } } - pub fn uint_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::UintTy) -> &'ll Type { + pub fn uint_from_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::UintTy) -> &'ll Type { match t { ast::UintTy::Usize => cx.isize_ty, ast::UintTy::U8 => Type::i8(cx), @@ -171,7 +172,7 @@ impl Type { } } - pub fn float_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::FloatTy) -> &'ll Type { + pub fn float_from_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::FloatTy) -> &'ll Type { match t { ast::FloatTy::F32 => Type::f32(cx), ast::FloatTy::F64 => Type::f64(cx), @@ -192,7 +193,11 @@ impl Type { } } - pub fn struct_(cx: &CodegenCx<'ll, '_>, els: &[&'ll Type], packed: bool) -> &'ll Type { + pub fn struct_( + cx: &CodegenCx<'ll, '_, &'ll Value>, + els: &[&'ll Type], + packed: bool + ) -> &'ll Type { unsafe { llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), els.len() as c_uint, @@ -200,7 +205,7 @@ impl Type { } } - pub fn named_struct(cx: &CodegenCx<'ll, '_>, name: &str) -> &'ll Type { + pub fn named_struct(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> &'ll Type { let name = SmallCStr::new(name); unsafe { llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()) @@ -279,7 +284,7 @@ impl Type { } } - pub fn from_integer(cx: &CodegenCx<'ll, '_>, i: layout::Integer) -> &'ll Type { + pub fn from_integer(cx: &CodegenCx<'ll, '_, &'ll Value>, i: layout::Integer) -> &'ll Type { use rustc::ty::layout::Integer::*; match i { I8 => Type::i8(cx), @@ -292,7 +297,7 @@ impl Type { /// Return a LLVM type that has at most the required alignment, /// as a conservative approximation for unknown pointee types. - pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_>, align: Align) -> &'ll Type { + pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_, &'ll Value>, align: Align) -> &'ll Type { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = layout::Integer::approximate_abi_align(cx, align); Type::from_integer(cx, ity) @@ -300,7 +305,11 @@ impl Type { /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. - pub fn padding_filler(cx: &CodegenCx<'ll, '_>, size: Size, align: Align) -> &'ll Type { + pub fn padding_filler( + cx: &CodegenCx<'ll, '_, &'ll Value>, + size: Size, + align: Align + ) -> &'ll Type { let unit = layout::Integer::approximate_abi_align(cx, align); let size = size.bytes(); let unit_size = unit.size().bytes(); @@ -308,7 +317,7 @@ impl Type { Type::array(Type::from_integer(cx, unit), size / unit_size) } - pub fn x86_mmx(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn x86_mmx(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMX86MMXTypeInContext(cx.llcx) } diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 03ded64e64235..e798f4e73f7f7 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -16,10 +16,11 @@ use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; +use value::Value; use std::fmt::Write; -fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, layout: TyLayout<'tcx>, defer: &mut Option<(&'a Type, TyLayout<'tcx>)>) -> &'a Type { @@ -111,7 +112,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } } -fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, layout: TyLayout<'tcx>) -> (Vec<&'a Type>, bool) { debug!("struct_llfields: {:#?}", layout); @@ -163,7 +164,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, (result, packed) } -impl<'a, 'tcx> CodegenCx<'a, 'tcx> { +impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { pub fn align_of(&self, ty: Ty<'tcx>) -> Align { self.layout_of(ty).align } @@ -202,14 +203,14 @@ pub struct PointeeInfo { pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; fn is_llvm_scalar_pair<'a>(&self) -> bool; - fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; - fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; - fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type; + fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type; + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, scalar: &layout::Scalar, offset: Size) -> &'a Type; - fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, index: usize, immediate: bool) -> &'a Type; fn llvm_field_index(&self, index: usize) -> u64; - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) + fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, offset: Size) -> Option; } @@ -245,7 +246,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. - fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { + fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). @@ -313,7 +314,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { llty } - fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { + fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { return Type::i1(cx); @@ -322,7 +323,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { self.llvm_type(cx) } - fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { layout::Int(i, _) => Type::from_integer(cx, i), @@ -340,7 +341,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, index: usize, immediate: bool) -> &'a Type { // HACK(eddyb) special-case fat pointers until LLVM removes // pointee types, to avoid bitcasting every `OperandRef::deref`. @@ -403,7 +404,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) + fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, offset: Size) -> Option { if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { return pointee; diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index 4bf5b09baa629..a17500cc87d68 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -15,12 +15,16 @@ use llvm; use std::fmt; use std::hash::{Hash, Hasher}; +pub trait ValueTrait : fmt::Debug {} + impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ } } +impl ValueTrait for Value {} + impl Eq for Value {} impl Hash for Value { From bc2897e81504d3ddd7b833e0e6acc9ca04aeb252 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 7 Aug 2018 17:14:40 +0200 Subject: [PATCH 03/76] Generalized base.rs#call_memcpy and everything that it uses Generalized operand.rs#nontemporal_store and fixed tidy issues Generalized operand.rs#nontemporal_store's implem even more With a BuilderMethod trait implemented by Builder for LLVM Cleaned builder.rs : no more code duplication, no more ValueTrait Full traitification of builder.rs --- .atom-build.yml | 1 + src/librustc_codegen_llvm/abi.rs | 24 +- src/librustc_codegen_llvm/asm.rs | 1 + src/librustc_codegen_llvm/attributes.rs | 2 +- src/librustc_codegen_llvm/back/write.rs | 3 +- src/librustc_codegen_llvm/base.rs | 38 +- src/librustc_codegen_llvm/builder.rs | 373 +++++++++--------- src/librustc_codegen_llvm/common.rs | 34 +- src/librustc_codegen_llvm/context.rs | 45 ++- src/librustc_codegen_llvm/debuginfo/gdb.rs | 3 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 1 + .../debuginfo/source_loc.rs | 6 +- src/librustc_codegen_llvm/declare.rs | 14 +- src/librustc_codegen_llvm/glue.rs | 8 +- src/librustc_codegen_llvm/intrinsic.rs | 30 +- src/librustc_codegen_llvm/lib.rs | 2 + src/librustc_codegen_llvm/meth.rs | 8 +- src/librustc_codegen_llvm/mir/block.rs | 28 +- src/librustc_codegen_llvm/mir/constant.rs | 1 + src/librustc_codegen_llvm/mir/mod.rs | 7 +- src/librustc_codegen_llvm/mir/operand.rs | 37 +- src/librustc_codegen_llvm/mir/place.rs | 8 +- src/librustc_codegen_llvm/mir/rvalue.rs | 24 +- src/librustc_codegen_llvm/traits.rs | 275 +++++++++++++ src/librustc_codegen_llvm/type_.rs | 113 ++++-- src/librustc_codegen_llvm/type_of.rs | 4 +- src/librustc_codegen_llvm/value.rs | 14 +- 27 files changed, 783 insertions(+), 321 deletions(-) create mode 100644 .atom-build.yml create mode 100644 src/librustc_codegen_llvm/traits.rs diff --git a/.atom-build.yml b/.atom-build.yml new file mode 100644 index 0000000000000..a31bc877c9f5f --- /dev/null +++ b/.atom-build.yml @@ -0,0 +1 @@ +cmd: ./x.py -i check diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 90f748fdec036..4f1ac352500fe 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -19,6 +19,8 @@ use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; +use traits::BuilderMethods; + use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty}; use rustc::ty::layout; @@ -119,7 +121,7 @@ impl LlvmType for Reg { } } RegKind::Vector => { - Type::vector(Type::i8(cx), self.size.bytes()) + Type::vector::(Type::i8(cx), self.size.bytes()) } } } @@ -143,7 +145,7 @@ impl LlvmType for CastTarget { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return Type::array(rest_ll_unit, rest_count); + return Type::array::(rest_ll_unit, rest_count); } } @@ -167,7 +169,12 @@ impl LlvmType for CastTarget { pub trait ArgTypeExt<'ll, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; - fn store(&self, bx: &Builder<'_, 'll, 'tcx, &'ll Value>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>); + fn store( + &self, + bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ); fn store_fn_arg( &self, bx: &Builder<'_, 'll, 'tcx, &'ll Value>, @@ -186,7 +193,12 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &Builder<'_, 'll, 'tcx, &'ll Value>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>) { + fn store( + &self, + bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ) { if self.is_ignore() { return; } @@ -633,9 +645,9 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } if self.variadic { - Type::variadic_func(&llargument_tys, llreturn_ty) + Type::variadic_func::(&llargument_tys, llreturn_ty) } else { - Type::func(&llargument_tys, llreturn_ty) + Type::func::(&llargument_tys, llreturn_ty) } } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index f9944f6956857..e61dd0538ccc0 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -16,6 +16,7 @@ use builder::Builder; use value::Value; use rustc::hir; +use traits::BuilderMethods; use mir::place::PlaceRef; use mir::operand::OperandValue; diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index 14f37d7584002..fdd252b92cc16 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -127,7 +127,7 @@ pub fn llvm_target_features(sess: &Session) -> impl Iterator { .filter(|l| !l.is_empty()) } -pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value) { let cpu = llvm_util::target_cpu(cx.tcx.sess); let target_cpu = CString::new(cpu).unwrap(); llvm::AddFunctionAttrStringValue( diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 81619c219757b..b92ee8c484e74 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -49,6 +49,7 @@ use context::{is_pie_binary, get_reloc_model}; use common::{C_bytes_in_context, val_ty}; use jobserver::{Client, Acquired}; use rustc_demangle; +use value::Value; use std::any::Any; use std::ffi::{CString, CStr}; @@ -2542,7 +2543,7 @@ fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::M "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx(llcx); + let i8p_ty = Type::i8p_llcx::(llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage && diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index c1ae0586a308b..7d4144eb543a7 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -75,6 +75,8 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; +use traits::BuilderMethods; + use std::any::Any; use std::ffi::CString; use std::sync::Arc; @@ -87,7 +89,7 @@ use syntax_pos::symbol::InternedString; use syntax::attr; use rustc::hir::{self, CodegenFnAttrs}; -use value::Value; +use value::{Value, ValueTrait}; use mir::operand::OperandValue; @@ -388,9 +390,14 @@ pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { bx.call(assume_intrinsic, &[val], None); } -pub fn from_immediate(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) -> &'ll Value { - if val_ty(val) == Type::i1(bx.cx) { - bx.zext(val, Type::i8(bx.cx)) +pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, + Value : ?Sized, + Builder: BuilderMethods<'a, 'll, 'tcx, Value>>( + bx: &Builder, + val: &'ll Value +) -> &'ll Value where Value : ValueTrait { + if val_ty(val) == Type::i1(bx.cx()) { + bx.zext(val, Type::i8(bx.cx())) } else { val } @@ -418,14 +425,16 @@ pub fn to_immediate_scalar( val } -pub fn call_memcpy( - bx: &Builder<'_, 'll, '_, &'ll Value>, +pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, + Value : ?Sized, + Builder: BuilderMethods<'a, 'll, 'tcx, Value>>( + bx: &Builder, dst: &'ll Value, src: &'ll Value, n_bytes: &'ll Value, align: Align, flags: MemFlags, -) { +) where Value : ValueTrait { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = bx.load(src, align); @@ -433,7 +442,7 @@ pub fn call_memcpy( bx.store_with_flags(val, ptr, align, flags); return; } - let cx = bx.cx; + let cx = bx.cx(); let ptr_width = &cx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); let memcpy = cx.get_intrinsic(&key); @@ -445,20 +454,22 @@ pub fn call_memcpy( bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty( - bx: &Builder<'_, 'll, 'tcx, &'ll Value>, +pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, + Value : ?Sized, + Builder: BuilderMethods<'a, 'll, 'tcx, Value>>( + bx: &Builder, dst: &'ll Value, src: &'ll Value, layout: TyLayout<'tcx>, align: Align, flags: MemFlags, -) { +) where Value : ValueTrait { let size = layout.size.bytes(); if size == 0 { return; } - call_memcpy(bx, dst, src, C_usize(bx.cx, size), align, flags); + call_memcpy(bx, dst, src, C_usize(bx.cx(), size), align, flags); } pub fn call_memset( @@ -549,7 +560,8 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { rust_main_def_id: DefId, use_start_lang_item: bool, ) { - let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); + let llfty = + Type::func::(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); // Given that `main()` has no arguments, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 9e929d5b722fc..a1dcd049bc145 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,6 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; +use traits::BuilderMethods; use std::borrow::Cow; use std::ops::Range; @@ -54,8 +55,8 @@ bitflags! { } } -impl Builder<'a, 'll, 'tcx, &'ll Value> { - pub fn new_block<'b>( +impl BuilderMethods<'a, 'll, 'tcx, Value> for Builder<'a, 'll, 'tcx, &'ll Value> { + fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, llfn: &'ll Value, name: &'b str @@ -73,7 +74,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { bx } - pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self { + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -84,84 +85,84 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx, &'ll Value> { + fn build_sibling_block<'b>(&self, name: &'b str) -> Self { Builder::new_block(self.cx, self.llfn(), name) } - pub fn sess(&self) -> &Session { + fn sess(&self) -> &Session { self.cx.sess() } - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.cx.tcx } - pub fn llfn(&self) -> &'ll Value { + fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - pub fn llbb(&self) -> &'ll BasicBlock { + fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } fn count_insn(&self, category: &str) { - if self.cx.sess().codegen_stats() { - self.cx.stats.borrow_mut().n_llvm_insns += 1; + if self.cx().sess().codegen_stats() { + self.cx().stats.borrow_mut().n_llvm_insns += 1; } - if self.cx.sess().count_llvm_insns() { - *self.cx.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; + if self.cx().sess().count_llvm_insns() { + *self.cx().stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; } } - pub fn set_value_name(&self, value: &'ll Value, name: &str) { + fn set_value_name(&self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - pub fn position_at_end(&self, llbb: &'ll BasicBlock) { + fn position_at_end(&self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - pub fn position_at_start(&self, llbb: &'ll BasicBlock) { + fn position_at_start(&self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } } - pub fn ret_void(&self) { + fn ret_void(&self) { self.count_insn("retvoid"); unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } } - pub fn ret(&self, v: &'ll Value) { + fn ret(&self, v: &'ll Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - pub fn br(&self, dest: &'ll BasicBlock) { + fn br(&self, dest: &'ll BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); } } - pub fn cond_br( + fn cond_br( &self, cond: &'ll Value, then_llbb: &'ll BasicBlock, @@ -173,7 +174,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn switch( + fn switch( &self, v: &'ll Value, else_llbb: &'ll BasicBlock, @@ -184,7 +185,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn invoke(&self, + fn invoke(&self, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, @@ -211,7 +212,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn unreachable(&self) { + fn unreachable(&self) { self.count_insn("unreachable"); unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); @@ -219,21 +220,21 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } /* Arithmetic */ - pub fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -242,21 +243,21 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -265,21 +266,21 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -289,42 +290,42 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } - pub fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -333,28 +334,28 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - pub fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -363,70 +364,70 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - pub fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - pub fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - pub fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - pub fn neg(&self, v: &'ll Value) -> &'ll Value { + fn neg(&self, v: &'ll Value) -> &'ll Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - pub fn fneg(&self, v: &'ll Value) -> &'ll Value { + fn fneg(&self, v: &'ll Value) -> &'ll Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - pub fn not(&self, v: &'ll Value) -> &'ll Value { + fn not(&self, v: &'ll Value) -> &'ll Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - pub fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { let bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -434,7 +435,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { bx.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -449,7 +450,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn array_alloca(&self, + fn array_alloca(&self, ty: &'ll Type, len: &'ll Value, name: &str, @@ -468,7 +469,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -477,7 +478,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -486,7 +487,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value { + fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value { self.count_insn("load.atomic"); unsafe { let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); @@ -499,7 +500,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } - pub fn range_metadata(&self, load: &'ll Value, range: Range) { + fn range_metadata(&self, load: &'ll Value, range: Range) { if self.sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, @@ -522,18 +523,31 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn nonnull_metadata(&self, load: &'ll Value) { + fn nonnull_metadata(&self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - pub fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - pub fn store_with_flags( + fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, + order: AtomicOrdering, align: Align) { + debug!("Store {:?} -> {:?}", val, ptr); + self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); + unsafe { + let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // Also see `atomic_load` for more context. + llvm::LLVMSetAlignment(store, align.pref() as c_uint); + } + } + + fn store_with_flags( &self, val: &'ll Value, ptr: &'ll Value, @@ -567,20 +581,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: AtomicOrdering, align: Align) { - debug!("Store {:?} -> {:?}", val, ptr); - self.count_insn("store.atomic"); - let ptr = self.check_store(val, ptr); - unsafe { - let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); - // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? - // Also see `atomic_load` for more context. - llvm::LLVMSetAlignment(store, align.pref() as c_uint); - } - } - - pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -588,7 +589,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -596,122 +597,108 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); - unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) - } - } - /* Casts */ - pub fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - - pub fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - pub fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - pub fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - pub fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) } } - pub fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("pointercast"); + + fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + self.count_insn("intcast"); unsafe { - llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - pub fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { - self.count_insn("intcast"); + fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); unsafe { - llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) } } /* Comparisons */ - pub fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } - pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -719,14 +706,14 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } /* Miscellaneous instructions */ - pub fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -738,10 +725,10 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> Option<&'ll Value> { + fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, + inputs: &[&'ll Value], output: &'ll Type, + volatile: bool, alignstack: bool, + dia: AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -755,7 +742,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = Type::func(&argtys[..], output); + let fty = Type::func::(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); @@ -771,31 +758,14 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { - self.count_insn("call"); - - debug!("Call {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("call", llfn, args); - let bundle = bundle.map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), - args.len() as c_uint, bundle, noname()) - } - } - - pub fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -803,7 +773,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn select( + fn select( &self, cond: &'ll Value, then_val: &'ll Value, else_val: &'ll Value, @@ -815,21 +785,21 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } #[allow(dead_code)] - pub fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - pub fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) } } - pub fn insert_element( + fn insert_element( &self, vec: &'ll Value, elt: &'ll Value, idx: &'ll Value, @@ -840,24 +810,24 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - pub fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = val_ty(elt); - let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); + let undef = llvm::LLVMGetUndef(Type::vector::(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); - let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); + let vec_i32_ty = Type::vector::(Type::i32(self.cx), num_elts as u64); self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) } } - pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once @@ -869,7 +839,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { instr } } - pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once @@ -881,56 +851,56 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { instr } } - pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.add"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") } } - pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.mul"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") } } - pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.and"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") } } - pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.or"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") } } - pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.xor"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") } } - pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") } } - pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") } } - pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) @@ -939,7 +909,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { instr } } - pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) @@ -948,14 +918,14 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { instr } } - pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.min"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") } } - pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.max"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); @@ -963,7 +933,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -971,7 +941,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -981,7 +951,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, + fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { @@ -990,27 +960,27 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - pub fn set_cleanup(&self, landing_pad: &'ll Value) { + fn set_cleanup(&self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - pub fn resume(&self, exn: &'ll Value) -> &'ll Value { + fn resume(&self, exn: &'ll Value) -> &'ll Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } } - pub fn cleanup_pad(&self, + fn cleanup_pad(&self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> &'ll Value { self.count_insn("cleanuppad"); @@ -1025,7 +995,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { ret.expect("LLVM does not have support for cleanuppad") } - pub fn cleanup_ret( + fn cleanup_ret( &self, cleanup: &'ll Value, unwind: Option<&'ll BasicBlock>, ) -> &'ll Value { @@ -1036,7 +1006,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { ret.expect("LLVM does not have support for cleanupret") } - pub fn catch_pad(&self, + fn catch_pad(&self, parent: &'ll Value, args: &[&'ll Value]) -> &'ll Value { self.count_insn("catchpad"); @@ -1049,7 +1019,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { ret.expect("LLVM does not have support for catchpad") } - pub fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { self.count_insn("catchret"); let ret = unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) @@ -1057,7 +1027,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { ret.expect("LLVM does not have support for catchret") } - pub fn catch_switch( + fn catch_switch( &self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, @@ -1073,20 +1043,20 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { ret.expect("LLVM does not have support for catchswitch") } - pub fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - pub fn set_personality_fn(&self, personality: &'ll Value) { + fn set_personality_fn(&self, personality: &'ll Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } } // Atomic Operations - pub fn atomic_cmpxchg( + fn atomic_cmpxchg( &self, dst: &'ll Value, cmp: &'ll Value, @@ -1100,7 +1070,7 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { order, failure_order, weak) } } - pub fn atomic_rmw( + fn atomic_rmw( &self, op: AtomicRmwBinOp, dst: &'ll Value, @@ -1112,26 +1082,26 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { } } - pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { + fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { unsafe { llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); } } - pub fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - pub fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - pub fn set_invariant_load(&self, load: &'ll Value) { + fn set_invariant_load(&self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); @@ -1201,11 +1171,11 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { Cow::Owned(casted_args) } - pub fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + fn lifetime_start(&self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - pub fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + fn lifetime_end(&self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } @@ -1232,4 +1202,45 @@ impl Builder<'a, 'll, 'tcx, &'ll Value> { let ptr = self.pointercast(ptr, Type::i8p(self.cx)); self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); } + + fn call(&self, llfn: &'ll Value, args: &[&'ll Value], + bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + let bundle = bundle.map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + bundle, noname() + ) + } + } + + fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } + } + + fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + + fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value> { + &self.cx + } } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 09c3a86a437a6..1642f317d89a9 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -23,11 +23,12 @@ use consts; use declare; use type_::Type; use type_of::LayoutLlvmExt; -use value::Value; +use value::{Value, ValueTrait}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::hir; +use traits::BuilderMethods; use libc::{c_uint, c_char}; use std::iter; @@ -112,9 +113,9 @@ impl Funclet<'ll> { } } -pub fn val_ty(v: &'ll Value) -> &'ll Type { +pub fn val_ty(v: &'ll Value) -> &'ll Type where Value : ValueTrait { unsafe { - llvm::LLVMTypeOf(v) + llvm::LLVMTypeOf(v.to_llvm()) } } @@ -125,21 +126,21 @@ pub fn C_null(t: &'ll Type) -> &'ll Value { } } -pub fn C_undef(t: &'ll Type) -> &'ll Value { +pub fn C_undef(t: &'ll Type) -> &'ll Value where Value : ValueTrait { unsafe { - llvm::LLVMGetUndef(t) + Value::of_llvm(llvm::LLVMGetUndef(t)) } } -pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { +pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value where Value : ValueTrait { unsafe { - llvm::LLVMConstInt(t, i as u64, True) + Value::of_llvm(llvm::LLVMConstInt(t, i as u64, True)) } } -pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { +pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value where Value : ValueTrait { unsafe { - llvm::LLVMConstInt(t, i, False) + Value::of_llvm(llvm::LLVMConstInt(t, i, False)) } } @@ -150,11 +151,17 @@ pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { } } -pub fn C_bool(cx: &CodegenCx<'ll, '_, &'ll Value>, val: bool) -> &'ll Value { +pub fn C_bool( + cx: &CodegenCx<'ll, '_, &'ll Value>, + val: bool +) -> &'ll Value where Value : ValueTrait { C_uint(Type::i1(cx), val as u64) } -pub fn C_i32(cx: &CodegenCx<'ll, '_, &'ll Value>, i: i32) -> &'ll Value { +pub fn C_i32( + cx: &CodegenCx<'ll, '_, &'ll Value>, + i: i32 +) -> &'ll Value where Value : ValueTrait { C_int(Type::i32(cx), i as i64) } @@ -166,7 +173,10 @@ pub fn C_u64(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { C_uint(Type::i64(cx), i) } -pub fn C_usize(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { +pub fn C_usize( + cx: &CodegenCx<'ll, '_, &'ll Value>, + i: u64 +) -> &'ll Value where Value : ValueTrait { let bit_size = cx.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 443e62718e4e6..86ef97528053f 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -18,7 +18,7 @@ use callee; use base; use declare; use monomorphize::Instance; -use value::Value; +use value::{Value, ValueTrait}; use monomorphize::partitioning::CodegenUnit; use type_::Type; @@ -283,7 +283,7 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { None }; - let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits()); + let isize_ty = Type::ix_llcx::(llcx, tcx.data_layout.pointer_size.bits()); CodegenCx { tcx, @@ -315,7 +315,7 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { } } -impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { +impl<'b, 'tcx, Value : ?Sized> CodegenCx<'b, 'tcx, &'b Value> where Value : ValueTrait { pub fn sess<'a>(&'a self) -> &'a Session { &self.tcx.sess } @@ -327,7 +327,9 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) } +} +impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { /// Generate a new symbol name with the given prefix. This symbol name must /// only be used for definitions with `internal` or `private` linkage. pub fn generate_local_symbol_name(&self, prefix: &str) -> String { @@ -377,7 +379,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } else { "rust_eh_personality" }; - let fty = Type::variadic_func(&[], Type::i32(self)); + let fty = Type::variadic_func::(&[], Type::i32(self)); declare::declare_cfn(self, name, fty) } }; @@ -445,7 +447,9 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } } -impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> { +impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> + where Value : ValueTrait +{ fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.tcx.data_layout } @@ -478,28 +482,31 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx, &'ll Value> { } /// Declare any llvm intrinsics that you might need -fn declare_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, key: &str) -> Option<&'ll Value> { +fn declare_intrinsic( + cx: &CodegenCx<'ll, '_, &'ll Value>, + key: &str +) -> Option<&'ll Value> where Value : ValueTrait { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); + let f = declare::declare_cfn(cx, $name, Type::func::(&[], $ret)); + llvm::SetUnnamedAddr(f.to_llvm(), false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); } ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); + let f = declare::declare_cfn(cx, $name, Type::variadic_func::(&[], $ret)); + llvm::SetUnnamedAddr(f.to_llvm(), false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); } ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f, false); + let f = declare::declare_cfn(cx, $name, Type::func::(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f.to_llvm(), false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); } @@ -520,14 +527,14 @@ fn declare_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, key: &str) -> Option<& let t_f32 = Type::f32(cx); let t_f64 = Type::f64(cx); - let t_v2f32 = Type::vector(t_f32, 2); - let t_v4f32 = Type::vector(t_f32, 4); - let t_v8f32 = Type::vector(t_f32, 8); - let t_v16f32 = Type::vector(t_f32, 16); + let t_v2f32 = Type::vector::(t_f32, 2); + let t_v4f32 = Type::vector::(t_f32, 4); + let t_v8f32 = Type::vector::(t_f32, 8); + let t_v16f32 = Type::vector::(t_f32, 16); - let t_v2f64 = Type::vector(t_f64, 2); - let t_v4f64 = Type::vector(t_f64, 4); - let t_v8f64 = Type::vector(t_f64, 8); + let t_v2f64 = Type::vector::(t_f64, 2); + let t_v4f64 = Type::vector::(t_f64, 4); + let t_v8f64 = Type::vector::(t_f64, 8); ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 627260604d503..64d224b02853d 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -18,6 +18,7 @@ use declare; use rustc::session::config::DebugInfo; use type_::Type; use value::Value; +use traits::BuilderMethods; use syntax::attr; @@ -55,7 +56,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array(Type::i8(cx), + let llvm_type = Type::array::(Type::i8(cx), section_contents.len() as u64); let section_var = declare::define_global(cx, section_var_name, diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 532ec589811c4..455e8d998623a 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -45,6 +45,7 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; use rustc::ty::layout::{self, LayoutOf}; +use traits::BuilderMethods; pub mod gdb; mod utils; diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index a9da3acbd9425..f4feabd2a92b2 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -17,6 +17,7 @@ use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; +use traits::BuilderMethods; use libc::c_uint; use syntax_pos::{Span, Pos}; @@ -79,7 +80,10 @@ impl InternalDebugLocation<'ll> { } } -pub fn set_debug_location(bx: &Builder<'_, 'll, '_, &'ll Value>, debug_location: InternalDebugLocation<'ll>) { +pub fn set_debug_location( + bx: &Builder<'_, 'll, '_, &'ll Value>, + debug_location: InternalDebugLocation<'ll> +) { let metadata_node = match debug_location { KnownLocation { scope, line, col } => { // For MSVC, set the column number to zero. diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 08c16caec21e4..d197ccfa8ec5d 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -32,7 +32,7 @@ use attributes; use context::CodegenCx; use common; use type_::Type; -use value::Value; +use value::{Value, ValueTrait}; /// Declare a global value. @@ -55,12 +55,12 @@ pub fn declare_global( /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. -fn declare_raw_fn( +fn declare_raw_fn( cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str, callconv: llvm::CallConv, ty: &'ll Type, -) -> &'ll Value { +) -> &'ll Value where Value : ValueTrait { debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); let namebuf = SmallCStr::new(name); let llfn = unsafe { @@ -109,7 +109,7 @@ fn declare_raw_fn( attributes::non_lazy_bind(cx.sess(), llfn); - llfn + Value::of_llvm(llfn) } @@ -120,11 +120,11 @@ fn declare_raw_fn( /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. -pub fn declare_cfn( +pub fn declare_cfn( cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str, fn_type: &'ll Type -) -> &'ll Value { +) -> &'ll Value where Value : ValueTrait { declare_raw_fn(cx, name, llvm::CCallConv, fn_type) } @@ -181,7 +181,7 @@ pub fn define_global( /// Declare a private global /// /// Use this function when you intend to define a global without a name. -pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value { +pub fn define_private_global(cx: &CodegenCx<'ll, '_, &'ll Value>, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty) } diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index b06c220c2e1c2..46f4c6595df24 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -21,9 +21,13 @@ use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; +use traits::BuilderMethods; -pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx, &'ll Value>, t: Ty<'tcx>, info: Option<&'ll Value>) - -> (&'ll Value, &'ll Value) { +pub fn size_and_align_of_dst( + bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + t: Ty<'tcx>, + info: Option<&'ll Value> +) -> (&'ll Value, &'ll Value) { debug!("calculate size of DST: {}; with lost info: {:?}", t, info); if bx.cx.type_is_sized(t) { diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index b6735e54c9944..4929af44cf22c 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -30,6 +30,8 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; +use traits::BuilderMethods; + use rustc::session::Session; use syntax_pos::Span; @@ -571,7 +573,7 @@ pub fn codegen_intrinsic_call( Vector(ref t, ref llvm_elem, length) => { let t = llvm_elem.as_ref().unwrap_or(t); let elem = one(ty_to_type(cx, t)); - vec![Type::vector(elem, length as u64)] + vec![Type::vector::(elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() @@ -620,7 +622,10 @@ pub fn codegen_intrinsic_call( } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))] + vec![ + bx.bitcast(arg.immediate(), + Type::vector::(llvm_elem, length as u64)) + ] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer @@ -648,7 +653,7 @@ pub fn codegen_intrinsic_call( intrinsics::IntrinsicDef::Named(name) => { let f = declare::declare_cfn(cx, name, - Type::func(&inputs, outputs)); + Type::func::(&inputs, outputs)); bx.call(f, &llargs, None) } }; @@ -1149,7 +1154,7 @@ fn generic_simd_intrinsic( } // truncate the mask to a vector of i1s let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, m_len as u64); + let i1xn = Type::vector::(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1290,7 +1295,7 @@ fn generic_simd_intrinsic( elem_ty = elem_ty.ptr_to(); no_pointers -= 1; } - Type::vector(elem_ty, vec_len as u64) + Type::vector::(elem_ty, vec_len as u64) } @@ -1373,7 +1378,7 @@ fn generic_simd_intrinsic( // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1xn = Type::vector::(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; @@ -1388,8 +1393,11 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty, - llvm_elem_vec_ty], llvm_elem_vec_ty)); + Type::func::(&[ + llvm_pointer_vec_ty, + alignment_ty, + mask_ty, + llvm_elem_vec_ty], llvm_elem_vec_ty)); llvm::SetUnnamedAddr(f, false); let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); @@ -1470,7 +1478,7 @@ fn generic_simd_intrinsic( // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1xn = Type::vector::(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; @@ -1487,7 +1495,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_elem_vec_ty, + Type::func::(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1622,7 +1630,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, // boolean reductions operate on vectors of i1s: let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1xn = Type::vector::(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 63a8ab077e5ae..185973c090555 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -106,6 +106,8 @@ mod back { pub mod wasm; } +mod traits; + mod abi; mod allocator; mod asm; diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 9c964ef30718f..96285d3c28648 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -17,6 +17,8 @@ use monomorphize; use type_::Type; use value::Value; +use traits::BuilderMethods; + use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; use debuginfo; @@ -48,7 +50,11 @@ impl<'a, 'tcx> VirtualIndex { ptr } - pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llvtable: &'ll Value) -> &'ll Value { + pub fn get_usize( + self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + llvtable: &'ll Value + ) -> &'ll Value { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 708ca8ffba267..4d7c7f86347cd 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -26,6 +26,8 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; +use traits::BuilderMethods; + use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -98,16 +100,17 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } }; - let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_, &'ll Value>, target: mir::BasicBlock| { - let (lltarget, is_cleanupret) = lltarget(this, target); - if is_cleanupret { - // micro-optimization: generate a `ret` rather than a jump - // to a trampoline. - bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); - } else { - bx.br(lltarget); - } - }; + let funclet_br = + |this: &mut Self, bx: Builder<'_, 'll, '_, &'ll Value>, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + }; let do_call = | this: &mut Self, @@ -803,7 +806,10 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> PlaceRef<'tcx, &'ll Value> { + fn get_personality_slot( + &mut self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value> + ) -> PlaceRef<'tcx, &'ll Value> { let cx = bx.cx; if let Some(slot) = self.personality_slot { slot diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index df7b4ec1a9861..d0a5b320ecc36 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -27,6 +27,7 @@ use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; +use traits::BuilderMethods; use super::super::callee; use super::FunctionCx; diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index fa1380854d932..5d318d32f7965 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -25,6 +25,7 @@ use monomorphize::Instance; use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; use type_::Type; use value::Value; +use traits::BuilderMethods; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -118,7 +119,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { ) } - pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_, &'ll Value>, source_info: mir::SourceInfo) { + pub fn set_debug_loc( + &mut self, + bx: &Builder<'_, 'll, '_, &'ll Value>, + source_info: mir::SourceInfo + ) { let (scope, span) = self.debug_loc(source_info); debuginfo::set_source_location(&self.debug_context, bx, scope, span); } diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index ab67a35895cef..cd5cb339cc97e 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -22,6 +22,8 @@ use type_of::LayoutLlvmExt; use type_::Type; use glue; +use traits::BuilderMethods; + use std::fmt; use super::{FunctionCx, LocalRef}; @@ -260,7 +262,11 @@ impl OperandValue<&'ll Value> { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>) { + pub fn volatile_store( + self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + dest: PlaceRef<'tcx, &'ll Value> + ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } @@ -271,14 +277,23 @@ impl OperandValue<&'ll Value> { ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } +} - pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>) { +impl<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized> OperandValue<&'ll Value> where + Value : ValueTrait, + Builder<'a, 'll, 'tcx, &'ll Value>: BuilderMethods<'a, 'll, 'tcx, Value> +{ + pub fn nontemporal_store( + self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + dest: PlaceRef<'tcx, &'ll Value> + ) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags( + fn store_with_flags>( self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Builder, dest: PlaceRef<'tcx, &'ll Value>, flags: MemFlags, ) { @@ -309,8 +324,14 @@ impl OperandValue<&'ll Value> { } } } +} - pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef<'ll, 'tcx>) { +impl OperandValue<&'ll Value> { + pub fn store_unsized( + self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + indirect_dest: PlaceRef<'tcx, &'ll Value> + ) { debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); let flags = MemFlags::empty(); @@ -330,13 +351,13 @@ impl OperandValue<&'ll Value> { let min_align = Align::from_bits(8, 8).unwrap(); // Allocate an appropriate region on the stack, and copy the value into it - let (llsize, _) = glue::size_and_align_of_dst(&bx, unsized_ty, Some(llextra)); + let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align); - base::call_memcpy(&bx, lldst, llptr, llsize, min_align, flags); + base::call_memcpy(bx, lldst, llptr, llsize, min_align, flags); // Store the allocated region and the extra to the indirect place. let indirect_operand = OperandValue::Pair(lldst, llextra); - indirect_operand.store(&bx, indirect_dest); + indirect_operand.store(bx, indirect_dest); } } diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 701e10ab79824..70457015ce5a5 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -23,6 +23,8 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; +use traits::BuilderMethods; + use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -279,7 +281,11 @@ impl PlaceRef<'tcx, &'ll Value> { } /// Obtain the actual discriminant of a value. - pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, cast_to: Ty<'tcx>) -> &'ll Value { + pub fn codegen_get_discr( + self, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + cast_to: Ty<'tcx> + ) -> &'ll Value { let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); if self.layout.abi.is_uninhabited() { return C_undef(cast_to); diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index d1f4e5d4f6f55..eeca176a6bbbc 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -28,6 +28,8 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; +use traits::BuilderMethods; + use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; @@ -179,10 +181,10 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } pub fn codegen_rvalue_unsized(&mut self, - bx: Builder<'a, 'll, 'tcx>, - indirect_dest: PlaceRef<'ll, 'tcx>, + bx: Builder<'a, 'll, 'tcx, &'ll Value>, + indirect_dest: PlaceRef<'tcx, &'ll Value>, rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> + -> Builder<'a, 'll, 'tcx, &'ll Value> { debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", indirect_dest.llval, rvalue); @@ -198,11 +200,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - pub fn codegen_rvalue_operand(&mut self, - bx: Builder<'a, 'll, 'tcx, &'ll Value>, - rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'll, 'tcx, &'ll Value>, OperandRef<'tcx, &'ll Value>) - { + pub fn codegen_rvalue_operand( + &mut self, + bx: Builder<'a, 'll, 'tcx, &'ll Value>, + rvalue: &mir::Rvalue<'tcx> + ) -> (Builder<'a, 'll, 'tcx, &'ll Value>, OperandRef<'tcx, &'ll Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { @@ -750,7 +752,11 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_, &'ll Value>, ty: Ty) -> &'ll Value { +fn get_overflow_intrinsic( + oop: OverflowOp, + bx: &Builder<'_, 'll, '_, &'ll Value>, + ty: Ty +) -> &'ll Value { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs new file mode 100644 index 0000000000000..bf05c83d0240a --- /dev/null +++ b/src/librustc_codegen_llvm/traits.rs @@ -0,0 +1,275 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use llvm::{IntPredicate, RealPredicate, OperandBundleDef}; +use llvm::{self, BasicBlock}; +use common::*; +use type_::Type; +use libc::c_char; +use rustc::ty::TyCtxt; +use rustc::ty::layout::{Align, Size}; +use rustc::session::Session; +use builder::MemFlags; + +use std::borrow::Cow; +use std::ops::Range; + + +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized> { + fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, + llfn: &'ll Value, + name: &'b str + ) -> Self; + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self; + fn build_sibling_block<'b>(&self, name: &'b str) -> Self; + fn sess(&self) -> &Session; + fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value>; + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; + fn llfn(&self) -> &'ll Value; + fn llbb(&self) -> &'ll BasicBlock; + fn count_insn(&self, category: &str); + + fn set_value_name(&self, value: &'ll Value, name: &str); + fn position_at_end(&self, llbb: &'ll BasicBlock); + fn position_at_start(&self, llbb: &'ll BasicBlock); + fn ret_void(&self); + fn ret(&self, v: &'ll Value); + fn br(&self, dest: &'ll BasicBlock); + fn cond_br( + &self, + cond: &'ll Value, + then_llbb: &'ll BasicBlock, + else_llbb: &'ll BasicBlock, + ); + fn switch( + &self, + v: &'ll Value, + else_llbb: &'ll BasicBlock, + num_cases: usize, + ) -> &'ll Value; + fn invoke( + &self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + bundle: Option<&OperandBundleDef<'ll>> + ) -> &'ll Value; + fn unreachable(&self); + fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn neg(&self, v: &'ll Value) -> &'ll Value; + fn fneg(&self, v: &'ll Value) -> &'ll Value; + fn not(&self, v: &'ll Value) -> &'ll Value; + + fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value; + fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value; + fn array_alloca( + &self, + ty: &'ll Type, + len: &'ll Value, + name: &str, + align: Align + ) -> &'ll Value; + + fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value; + fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value; + fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value; + + fn range_metadata(&self, load: &'ll Value, range: Range); + fn nonnull_metadata(&self, load: &'ll Value); + + fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value; + fn atomic_store( + &self, + val: &'ll Value, + ptr: &'ll Value, + order: AtomicOrdering, + align: Align + ); + fn store_with_flags( + &self, + val: &'ll Value, + ptr: &'ll Value, + align: Align, + flags: MemFlags, + ) -> &'ll Value; + + fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value; + fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value; + fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value; + + fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value; + fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + + fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + + fn empty_phi(&self, ty: &'ll Type) -> &'ll Value; + fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value; + fn inline_asm_call( + &self, + asm: *const c_char, + cons: *const c_char, + inputs: &[&'ll Value], + output: &'ll Type, + volatile: bool, + alignstack: bool, + dia: AsmDialect + ) -> &'ll Value; + + fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn select( + &self, cond: &'ll Value, + then_val: &'ll Value, + else_val: &'ll Value, + ) -> &'ll Value; + + fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value; + fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value; + fn insert_element( + &self, vec: &'ll Value, + elt: &'ll Value, + idx: &'ll Value, + ) -> &'ll Value; + fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value; + fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value; + fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value; + fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value; + fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value; + fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value; + fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value; + fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value; + fn insert_value( + &self, + agg_val: &'ll Value, + elt: &'ll Value, + idx: u64 + ) -> &'ll Value; + + fn landing_pad( + &self, + ty: &'ll Type, + pers_fn: &'ll Value, + num_clauses: usize + ) -> &'ll Value; + fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value); + fn set_cleanup(&self, landing_pad: &'ll Value); + fn resume(&self, exn: &'ll Value) -> &'ll Value; + fn cleanup_pad( + &self, + parent: Option<&'ll Value>, + args: &[&'ll Value] + ) -> &'ll Value; + fn cleanup_ret( + &self, cleanup: &'ll Value, + unwind: Option<&'ll BasicBlock>, + ) -> &'ll Value; + fn catch_pad( + &self, + parent: &'ll Value, + args: &[&'ll Value] + ) -> &'ll Value; + fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value; + fn catch_switch( + &self, + parent: Option<&'ll Value>, + unwind: Option<&'ll BasicBlock>, + num_handlers: usize, + ) -> &'ll Value; + fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock); + fn set_personality_fn(&self, personality: &'ll Value); + + fn atomic_cmpxchg( + &self, + dst: &'ll Value, + cmp: &'ll Value, + src: &'ll Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: llvm::Bool, + ) -> &'ll Value; + fn atomic_rmw( + &self, + op: AtomicRmwBinOp, + dst: &'ll Value, + src: &'ll Value, + order: AtomicOrdering, + ) -> &'ll Value; + fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope); + fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock); + fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock); + fn set_invariant_load(&self, load: &'ll Value); + + fn check_store( + &self, + val: &'ll Value, + ptr: &'ll Value + ) -> &'ll Value; + fn check_call<'b>( + &self, + typ: &str, + llfn: &'ll Value, + args: &'b [&'ll Value] + ) -> Cow<'b, [&'ll Value]>; + fn lifetime_start(&self, ptr: &'ll Value, size: Size); + fn lifetime_end(&self, ptr: &'ll Value, size: Size); + + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size); + + fn call(&self, llfn: &'ll Value, args: &[&'ll Value], + bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value; + fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; +} diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 10e0361ee1bcb..e4e4eca3fbbdd 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -16,7 +16,7 @@ use llvm; use llvm::{Bool, False, True, TypeKind}; use context::CodegenCx; -use value::Value; +use value::{Value, ValueTrait}; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; @@ -41,107 +41,143 @@ impl fmt::Debug for Type { } impl Type { - pub fn void(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn void( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMVoidTypeInContext(cx.llcx) } } - pub fn metadata(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn metadata( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMRustMetadataTypeInContext(cx.llcx) } } - pub fn i1(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i1( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMInt1TypeInContext(cx.llcx) } } - pub fn i8(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i8( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMInt8TypeInContext(cx.llcx) } } - pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + pub fn i8_llcx(llcx: &llvm::Context) -> &Type where Value : ValueTrait { unsafe { llvm::LLVMInt8TypeInContext(llcx) } } - pub fn i16(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i16( + cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type where Value : ValueTrait { unsafe { + llvm::LLVMInt16TypeInContext(cx.llcx) } } - pub fn i32(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i32( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMInt32TypeInContext(cx.llcx) } } - pub fn i64(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i64( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMInt64TypeInContext(cx.llcx) } } - pub fn i128(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i128( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMIntTypeInContext(cx.llcx, 128) } } // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix(cx: &CodegenCx<'ll, '_, &'ll Value>, num_bits: u64) -> &'ll Type { + pub fn ix( + cx: &CodegenCx<'ll, '_, &'ll Value>, + num_bits: u64 + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) } } // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { + pub fn ix_llcx( + llcx: &llvm::Context, + num_bits: u64 + ) -> &Type where Value : ValueTrait { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } } - pub fn f32(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn f32( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMFloatTypeInContext(cx.llcx) } } - pub fn f64(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn f64( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMDoubleTypeInContext(cx.llcx) } } - pub fn bool(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn bool( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { Type::i8(cx) } - pub fn char(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn char( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { Type::i32(cx) } - pub fn i8p(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn i8p( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { Type::i8(cx).ptr_to() } - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to() + pub fn i8p_llcx(llcx: &llvm::Context) -> &Type where Value : ValueTrait { + Type::i8_llcx::(llcx).ptr_to() } - pub fn isize(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn isize( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { cx.isize_ty } - pub fn c_int(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + pub fn c_int( + cx: &CodegenCx<'ll, '_, &'ll Value> + ) -> &'ll Type where Value : ValueTrait { match &cx.tcx.sess.target.target.target_c_int_width[..] { "16" => Type::i16(cx), "32" => Type::i32(cx), @@ -150,7 +186,10 @@ impl Type { } } - pub fn int_from_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::IntTy) -> &'ll Type { + pub fn int_from_ty( + cx: &CodegenCx<'ll, '_, &'ll Value>, + t: ast::IntTy + ) -> &'ll Type where Value : ValueTrait { match t { ast::IntTy::Isize => cx.isize_ty, ast::IntTy::I8 => Type::i8(cx), @@ -161,7 +200,10 @@ impl Type { } } - pub fn uint_from_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::UintTy) -> &'ll Type { + pub fn uint_from_ty( + cx: &CodegenCx<'ll, '_, &'ll Value>, + t: ast::UintTy + ) -> &'ll Type where Value : ValueTrait { match t { ast::UintTy::Usize => cx.isize_ty, ast::UintTy::U8 => Type::i8(cx), @@ -172,32 +214,41 @@ impl Type { } } - pub fn float_from_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::FloatTy) -> &'ll Type { + pub fn float_from_ty( + cx: &CodegenCx<'ll, '_, &'ll Value>, + t: ast::FloatTy + ) -> &'ll Type where Value : ValueTrait { match t { ast::FloatTy::F32 => Type::f32(cx), ast::FloatTy::F64 => Type::f64(cx), } } - pub fn func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + pub fn func( + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) } } - pub fn variadic_func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + pub fn variadic_func( + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } - pub fn struct_( + pub fn struct_( cx: &CodegenCx<'ll, '_, &'ll Value>, els: &[&'ll Type], packed: bool - ) -> &'ll Type { + ) -> &'ll Type where Value : ValueTrait { unsafe { llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), els.len() as c_uint, @@ -213,13 +264,13 @@ impl Type { } - pub fn array(ty: &Type, len: u64) -> &Type { + pub fn array(ty: &Type, len: u64) -> &Type where Value : ValueTrait { unsafe { llvm::LLVMRustArrayType(ty, len) } } - pub fn vector(ty: &Type, len: u64) -> &Type { + pub fn vector(ty: &Type, len: u64) -> &Type where Value : ValueTrait { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } @@ -314,7 +365,7 @@ impl Type { let size = size.bytes(); let unit_size = unit.size().bytes(); assert_eq!(size % unit_size, 0); - Type::array(Type::from_integer(cx, unit), size / unit_size) + Type::array::(Type::from_integer(cx, unit), size / unit_size) } pub fn x86_mmx(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index e798f4e73f7f7..ebd4aec1a3542 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -41,7 +41,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, return Type::x86_mmx(cx) } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return Type::vector(element, count); + return Type::vector::(element, count); } } layout::Abi::ScalarPair(..) => { @@ -94,7 +94,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, } } layout::FieldPlacement::Array { count, .. } => { - Type::array(layout.field(cx, 0).llvm_type(cx), count) + Type::array::(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index a17500cc87d68..b405a72724ef1 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -15,7 +15,10 @@ use llvm; use std::fmt; use std::hash::{Hash, Hasher}; -pub trait ValueTrait : fmt::Debug {} +pub trait ValueTrait : fmt::Debug { + fn to_llvm(&self) -> &llvm::Value; + fn of_llvm(&llvm::Value) -> &Self; +} impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { @@ -23,7 +26,14 @@ impl PartialEq for Value { } } -impl ValueTrait for Value {} +impl ValueTrait for Value { + fn to_llvm(&self) -> &llvm::Value { + &self + } + fn of_llvm(v: &llvm::Value) -> &Self { + &v + } +} impl Eq for Value {} From ba2ed26f3f9d24eef082dce1f1de9f32dbd47e5c Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 20 Aug 2018 17:44:18 +0200 Subject: [PATCH 04/76] Generalized BasicBlocks in BuilderMethods trait --- src/librustc_codegen_llvm/base.rs | 7 ++++--- src/librustc_codegen_llvm/builder.rs | 3 ++- src/librustc_codegen_llvm/mir/operand.rs | 6 ++++-- src/librustc_codegen_llvm/traits.rs | 8 ++++++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 7d4144eb543a7..bdf9dc49250b0 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -76,6 +76,7 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; use traits::BuilderMethods; +use llvm::BasicBlock; use std::any::Any; use std::ffi::CString; @@ -392,7 +393,7 @@ pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized, - Builder: BuilderMethods<'a, 'll, 'tcx, Value>>( + Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( bx: &Builder, val: &'ll Value ) -> &'ll Value where Value : ValueTrait { @@ -427,7 +428,7 @@ pub fn to_immediate_scalar( pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized, - Builder: BuilderMethods<'a, 'll, 'tcx, Value>>( + Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( bx: &Builder, dst: &'ll Value, src: &'ll Value, @@ -456,7 +457,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized, - Builder: BuilderMethods<'a, 'll, 'tcx, Value>>( + Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( bx: &Builder, dst: &'ll Value, src: &'ll Value, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index a1dcd049bc145..cd6149766b1d9 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -55,7 +55,8 @@ bitflags! { } } -impl BuilderMethods<'a, 'll, 'tcx, Value> for Builder<'a, 'll, 'tcx, &'ll Value> { +impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> + for Builder<'a, 'll, 'tcx, &'ll Value> { fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, llfn: &'ll Value, diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index cd5cb339cc97e..80f7ab9f0b83f 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -23,6 +23,7 @@ use type_::Type; use glue; use traits::BuilderMethods; +use llvm::BasicBlock; use std::fmt; @@ -281,7 +282,8 @@ impl OperandValue<&'ll Value> { impl<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized> OperandValue<&'ll Value> where Value : ValueTrait, - Builder<'a, 'll, 'tcx, &'ll Value>: BuilderMethods<'a, 'll, 'tcx, Value> + Builder<'a, 'll, 'tcx, &'ll Value>: + BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> { pub fn nontemporal_store( self, @@ -291,7 +293,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized> OperandValue<&'ll Value> where self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags>( + fn store_with_flags>( self, bx: &Builder, dest: PlaceRef<'tcx, &'ll Value>, diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index bf05c83d0240a..a422f348063c1 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -10,7 +10,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{IntPredicate, RealPredicate, OperandBundleDef}; -use llvm::{self, BasicBlock}; +use llvm; use common::*; use type_::Type; use libc::c_char; @@ -23,7 +23,11 @@ use std::borrow::Cow; use std::ops::Range; -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized> { +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, + Value : ?Sized, + BasicBlock: ?Sized + > { + fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, llfn: &'ll Value, From a4f6f8e9fc3fb9106358148173848d264e8d333d Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 20 Aug 2018 18:16:51 +0200 Subject: [PATCH 05/76] Generalized IntPredicate in the BuilderMethods trait --- src/librustc_codegen_llvm/base.rs | 16 +++++++------- src/librustc_codegen_llvm/builder.rs | 10 +++++---- src/librustc_codegen_llvm/glue.rs | 5 ++--- src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_llvm/llvm/ffi.rs | 18 ++++++++++++++++ src/librustc_codegen_llvm/mir/block.rs | 4 ++-- src/librustc_codegen_llvm/mir/place.rs | 6 +++--- src/librustc_codegen_llvm/mir/rvalue.rs | 28 ++++++++++++------------- src/librustc_codegen_llvm/traits.rs | 22 ++++++++++++++++--- 9 files changed, 73 insertions(+), 38 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index bdf9dc49250b0..20abc324b650a 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -75,7 +75,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use traits::BuilderMethods; +use traits::{IntPredicate, BuilderMethods}; use llvm::BasicBlock; use std::any::Any; @@ -128,14 +128,14 @@ impl Drop for StatRecorder<'a, 'll, 'tcx> { pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) - -> llvm::IntPredicate { + -> IntPredicate { match op { - hir::BinOpKind::Eq => llvm::IntEQ, - hir::BinOpKind::Ne => llvm::IntNE, - hir::BinOpKind::Lt => if signed { llvm::IntSLT } else { llvm::IntULT }, - hir::BinOpKind::Le => if signed { llvm::IntSLE } else { llvm::IntULE }, - hir::BinOpKind::Gt => if signed { llvm::IntSGT } else { llvm::IntUGT }, - hir::BinOpKind::Ge => if signed { llvm::IntSGE } else { llvm::IntUGE }, + hir::BinOpKind::Eq => IntPredicate::IntEQ, + hir::BinOpKind::Ne => IntPredicate::IntNE, + hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT }, + hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE }, + hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT }, + hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE }, op => { bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ found {:?}", diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index cd6149766b1d9..e183bc29a1d7d 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{IntPredicate, RealPredicate, False, OperandBundleDef}; +use llvm::{RealPredicate, False, OperandBundleDef}; use llvm::{self, BasicBlock}; use common::*; use type_::Type; @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use traits::BuilderMethods; +use traits::{self, BuilderMethods}; use std::borrow::Cow; use std::ops::Range; @@ -692,8 +692,9 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } /* Comparisons */ - fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); + let op : llvm::IntPredicate = traits::IntPredicateMethods::convert_to_backend_specific(op); unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } @@ -1064,8 +1065,9 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> src: &'ll Value, order: AtomicOrdering, failure_order: AtomicOrdering, - weak: llvm::Bool, + weak: bool, ) -> &'ll Value { + let weak = if weak { llvm::True } else { llvm::False }; unsafe { llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, order, failure_order, weak) diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 46f4c6595df24..a5af841d9bdba 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -16,12 +16,11 @@ use std; use builder::Builder; use common::*; -use llvm; use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -use traits::BuilderMethods; +use traits::{IntPredicate,BuilderMethods}; pub fn size_and_align_of_dst( bx: &Builder<'_, 'll, 'tcx, &'ll Value>, @@ -100,7 +99,7 @@ pub fn size_and_align_of_dst( // pick the correct alignment statically. C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) } - _ => bx.select(bx.icmp(llvm::IntUGT, sized_align, unsized_align), + _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align), sized_align, unsized_align) }; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 4929af44cf22c..ae540f43f4155 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -455,7 +455,7 @@ pub fn codegen_intrinsic_call( "cxchg" | "cxchgweak" => { let ty = substs.type_at(0); if int_type_width_signed(ty, cx).is_some() { - let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; + let weak = split[1] == "cxchgweak"; let pair = bx.atomic_cmpxchg( args[0].immediate(), args[1].immediate(), diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 0b98fa4eaf551..8c47be62fdd44 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,6 +19,7 @@ use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; +use traits; use super::RustString; @@ -141,6 +142,23 @@ pub enum IntPredicate { IntSLE = 41, } +impl traits::IntPredicateMethods for IntPredicate { + fn convert_to_backend_specific(intpre: traits::IntPredicate) -> Self { + match intpre { + traits::IntPredicate::IntEQ => IntPredicate::IntEQ, + traits::IntPredicate::IntNE => IntPredicate::IntNE, + traits::IntPredicate::IntUGT => IntPredicate::IntUGT, + traits::IntPredicate::IntUGE => IntPredicate::IntUGE, + traits::IntPredicate::IntULT => IntPredicate::IntULT, + traits::IntPredicate::IntULE => IntPredicate::IntULE, + traits::IntPredicate::IntSGT => IntPredicate::IntSGT, + traits::IntPredicate::IntSGE => IntPredicate::IntSGE, + traits::IntPredicate::IntSLT => IntPredicate::IntSLT, + traits::IntPredicate::IntSLE => IntPredicate::IntSLE, + } + } +} + /// LLVMRealPredicate #[derive(Copy, Clone)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 4d7c7f86347cd..53e5841df29ae 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -26,7 +26,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use traits::BuilderMethods; +use traits::{IntPredicate,BuilderMethods}; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -210,7 +210,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } else { let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); let llval = C_uint_big(switch_llty, values[0]); - let cmp = bx.icmp(llvm::IntEQ, discr.immediate(), llval); + let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } } else { diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 70457015ce5a5..b414be19b2c78 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -23,7 +23,7 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use traits::BuilderMethods; +use traits::{IntPredicate,BuilderMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -331,7 +331,7 @@ impl PlaceRef<'tcx, &'ll Value> { } else { C_uint_big(niche_llty, niche_start) }; - bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval), + bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval), C_uint(cast_to, *niche_variants.start() as u64), C_uint(cast_to, dataful_variant as u64)) } else { @@ -339,7 +339,7 @@ impl PlaceRef<'tcx, &'ll Value> { let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); let lldiscr_max = C_uint(niche_llty, *niche_variants.end() as u64); - bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max), + bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max), bx.intcast(lldiscr, cast_to, false), C_uint(cast_to, dataful_variant as u64)) } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index eeca176a6bbbc..12761e9b3694e 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -28,7 +28,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use traits::BuilderMethods; +use traits::{IntPredicate,BuilderMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -135,7 +135,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.br(header_bx.llbb()); let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]); - let keep_going = header_bx.icmp(llvm::IntNE, current, end); + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); cg_elem.val.store(&body_bx, @@ -337,7 +337,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // convenient place to put the `assume`. base::call_assume(&bx, bx.icmp( - llvm::IntULE, + IntPredicate::IntULE, llval, C_uint_big(ll_t_in, *scalar.valid_range.end()) )); @@ -639,31 +639,31 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { match op { mir::BinOp::Eq => { bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra) + bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr), + bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra) ) } mir::BinOp::Ne => { bx.or( - bx.icmp(llvm::IntNE, lhs_addr, rhs_addr), - bx.icmp(llvm::IntNE, lhs_extra, rhs_extra) + bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr), + bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra) ) } mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => { // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) let (op, strict_op) = match op { - mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT), - mir::BinOp::Le => (llvm::IntULE, llvm::IntULT), - mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT), - mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT), + mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT), + mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT), + mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT), + mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT), _ => bug!(), }; bx.or( bx.icmp(strict_op, lhs_addr, rhs_addr), bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), + bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr), bx.icmp(op, lhs_extra, rhs_extra) ) ) @@ -710,7 +710,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, C_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -838,7 +838,7 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); - let overflow = bx.icmp(llvm::IntUGE, x, max); + let overflow = bx.icmp(IntPredicate::IntUGE, x, max); let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); bx.select(overflow, infinity, bx.uitofp(x, float_ty)) diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index a422f348063c1..3d88394ef1205 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -9,8 +9,7 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{IntPredicate, RealPredicate, OperandBundleDef}; -use llvm; +use llvm::{RealPredicate, OperandBundleDef}; use common::*; use type_::Type; use libc::c_char; @@ -22,6 +21,23 @@ use builder::MemFlags; use std::borrow::Cow; use std::ops::Range; +pub enum IntPredicate { + IntEQ, + IntNE, + IntUGT, + IntUGE, + IntULT, + IntULE, + IntSGT, + IntSGE, + IntSLT, + IntSLE, +} + +pub trait IntPredicateMethods { + fn convert_to_backend_specific(intpre : IntPredicate) -> Self; +} + pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized, @@ -243,7 +259,7 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, src: &'ll Value, order: AtomicOrdering, failure_order: AtomicOrdering, - weak: llvm::Bool, + weak: bool, ) -> &'ll Value; fn atomic_rmw( &self, From c42cf099b22991321f735e8787453073ee89e406 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 13:54:41 +0200 Subject: [PATCH 06/76] Generalized RealPredicate --- src/librustc_codegen_llvm/base.rs | 16 +++++++-------- src/librustc_codegen_llvm/builder.rs | 4 ++-- src/librustc_codegen_llvm/llvm/ffi.rs | 23 ++++++++++++++++++++++ src/librustc_codegen_llvm/mir/rvalue.rs | 9 ++++----- src/librustc_codegen_llvm/traits.rs | 26 ++++++++++++++++++++++++- 5 files changed, 62 insertions(+), 16 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 20abc324b650a..bc07550c34760 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -75,7 +75,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use traits::{IntPredicate, BuilderMethods}; +use traits::{IntPredicate, RealPredicate, BuilderMethods}; use llvm::BasicBlock; use std::any::Any; @@ -144,14 +144,14 @@ pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, } } -pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate { +pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { match op { - hir::BinOpKind::Eq => llvm::RealOEQ, - hir::BinOpKind::Ne => llvm::RealUNE, - hir::BinOpKind::Lt => llvm::RealOLT, - hir::BinOpKind::Le => llvm::RealOLE, - hir::BinOpKind::Gt => llvm::RealOGT, - hir::BinOpKind::Ge => llvm::RealOGE, + hir::BinOpKind::Eq => RealPredicate::RealOEQ, + hir::BinOpKind::Ne => RealPredicate::RealUNE, + hir::BinOpKind::Lt => RealPredicate::RealOLT, + hir::BinOpKind::Le => RealPredicate::RealOLE, + hir::BinOpKind::Gt => RealPredicate::RealOGT, + hir::BinOpKind::Ge => RealPredicate::RealOGE, op => { bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ found {:?}", diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index e183bc29a1d7d..df5cbca59c3e1 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{RealPredicate, False, OperandBundleDef}; +use llvm::{False, OperandBundleDef}; use llvm::{self, BasicBlock}; use common::*; use type_::Type; @@ -700,7 +700,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&self, op: traits::RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 8c47be62fdd44..dedf67e212ef3 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -181,6 +181,29 @@ pub enum RealPredicate { RealPredicateTrue = 15, } +impl traits::RealPredicateMethods for RealPredicate { + fn convert_to_backend_specific(realpred: traits::RealPredicate) -> Self { + match realpred { + traits::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, + traits::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + traits::RealPredicate::RealOGT => RealPredicate::RealOGT, + traits::RealPredicate::RealOGE => RealPredicate::RealOGE, + traits::RealPredicate::RealOLT => RealPredicate::RealOLT, + traits::RealPredicate::RealOLE => RealPredicate::RealOLE, + traits::RealPredicate::RealONE => RealPredicate::RealONE, + traits::RealPredicate::RealORD => RealPredicate::RealORD, + traits::RealPredicate::RealUNO => RealPredicate::RealUNO, + traits::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + traits::RealPredicate::RealUGT => RealPredicate::RealUGT, + traits::RealPredicate::RealUGE => RealPredicate::RealUGE, + traits::RealPredicate::RealULT => RealPredicate::RealULT, + traits::RealPredicate::RealULE => RealPredicate::RealULE, + traits::RealPredicate::RealUNE => RealPredicate::RealUNE, + traits::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue + } + } +} + /// LLVMTypeKind #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 12761e9b3694e..0439b577ba7ae 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::{self, LayoutOf}; @@ -28,7 +27,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use traits::{IntPredicate,BuilderMethods}; +use traits::{IntPredicate, RealPredicate, BuilderMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -962,8 +961,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // negation, and the negation can be merged into the select. Therefore, it not necessarily any // more expensive than a ordered ("normal") comparison. Whether these optimizations will be // performed is ultimately up to the backend, but at least x86 does perform them. - let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min); - let greater = bx.fcmp(llvm::RealOGT, x, f_max); + let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); + let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); @@ -974,7 +973,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0)) + bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, C_uint(int_ty, 0)) } else { s1 } diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index 3d88394ef1205..c329caa60efbb 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{RealPredicate, OperandBundleDef}; +use llvm::OperandBundleDef; use common::*; use type_::Type; use libc::c_char; @@ -38,6 +38,30 @@ pub trait IntPredicateMethods { fn convert_to_backend_specific(intpre : IntPredicate) -> Self; } +#[allow(dead_code)] +pub enum RealPredicate { + RealPredicateFalse, + RealOEQ, + RealOGT, + RealOGE, + RealOLT, + RealOLE, + RealONE, + RealORD, + RealUNO, + RealUEQ, + RealUGT, + RealUGE, + RealULT, + RealULE, + RealUNE, + RealPredicateTrue, +} + +pub trait RealPredicateMethods { + fn convert_to_backend_specific(realpred : RealPredicate) -> Self; +} + pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized, From 138b446ea093c13e558dcda2040c66c7850034a8 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 16:31:36 +0200 Subject: [PATCH 07/76] Removed useless traits for IntPredicate and RealPredicate --- src/librustc_codegen_llvm/builder.rs | 2 +- src/librustc_codegen_llvm/llvm/ffi.rs | 8 ++++---- src/librustc_codegen_llvm/traits.rs | 9 --------- 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index df5cbca59c3e1..a108d822f3620 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -694,7 +694,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> /* Comparisons */ fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); - let op : llvm::IntPredicate = traits::IntPredicateMethods::convert_to_backend_specific(op); + let op = llvm::IntPredicate::from_generic(op); unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index dedf67e212ef3..b9c573fa23701 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -142,8 +142,8 @@ pub enum IntPredicate { IntSLE = 41, } -impl traits::IntPredicateMethods for IntPredicate { - fn convert_to_backend_specific(intpre: traits::IntPredicate) -> Self { +impl IntPredicate { + pub fn from_generic(intpre: traits::IntPredicate) -> Self { match intpre { traits::IntPredicate::IntEQ => IntPredicate::IntEQ, traits::IntPredicate::IntNE => IntPredicate::IntNE, @@ -181,8 +181,8 @@ pub enum RealPredicate { RealPredicateTrue = 15, } -impl traits::RealPredicateMethods for RealPredicate { - fn convert_to_backend_specific(realpred: traits::RealPredicate) -> Self { +impl RealPredicate { + pub fn from_generic(realpred: traits::RealPredicate) -> Self { match realpred { traits::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, traits::RealPredicate::RealOEQ => RealPredicate::RealOEQ, diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index c329caa60efbb..cdb5a93f0f86d 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -34,10 +34,6 @@ pub enum IntPredicate { IntSLE, } -pub trait IntPredicateMethods { - fn convert_to_backend_specific(intpre : IntPredicate) -> Self; -} - #[allow(dead_code)] pub enum RealPredicate { RealPredicateFalse, @@ -58,11 +54,6 @@ pub enum RealPredicate { RealPredicateTrue, } -pub trait RealPredicateMethods { - fn convert_to_backend_specific(realpred : RealPredicate) -> Self; -} - - pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized, BasicBlock: ?Sized From c62e2221014c55e85b0e14701ed7eeeefa9b1d98 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 17:39:43 +0200 Subject: [PATCH 08/76] Generalized OperandBundleDef in BuilderMethods --- src/librustc_codegen_llvm/builder.rs | 8 ++++---- src/librustc_codegen_llvm/common.rs | 10 +++++----- src/librustc_codegen_llvm/llvm/mod.rs | 5 +++++ src/librustc_codegen_llvm/traits.rs | 20 +++++++++++++++++--- 4 files changed, 31 insertions(+), 12 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index a108d822f3620..63de9fcf37e35 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -191,7 +191,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -199,7 +199,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> args); let args = self.check_call("invoke", llfn, args); - let bundle = bundle.map(|b| &*b.raw); + let bundle = bundle.map(|b| &*(OperandBundleDef::from_generic(b)).raw); unsafe { llvm::LLVMRustBuildInvoke(self.llbuilder, @@ -1207,7 +1207,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { self.count_insn("call"); debug!("Call {:?} with args ({:?})", @@ -1215,7 +1215,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> args); let args = self.check_call("call", llfn, args); - let bundle = bundle.map(|b| &*b.raw); + let bundle = bundle.map(|b| &*(OperandBundleDef::from_generic(b)).raw); unsafe { llvm::LLVMRustBuildCall( diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 1642f317d89a9..60fd08ce92d23 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -13,7 +13,7 @@ //! Code that is useful in various codegen modules. use llvm::{self, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef}; +use llvm::{True, False, Bool}; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; use abi; @@ -28,7 +28,7 @@ use value::{Value, ValueTrait}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::hir; -use traits::BuilderMethods; +use traits::{BuilderMethods, OperandBundleDef}; use libc::{c_uint, c_char}; use std::iter; @@ -93,14 +93,14 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo /// the `OperandBundleDef` value created for MSVC landing pads. pub struct Funclet<'ll> { cleanuppad: &'ll Value, - operand: OperandBundleDef<'ll>, + operand: OperandBundleDef<'ll, &'ll Value>, } impl Funclet<'ll> { pub fn new(cleanuppad: &'ll Value) -> Self { Funclet { cleanuppad, - operand: OperandBundleDef::new("funclet", &[cleanuppad]), + operand: OperandBundleDef::new("funclet", cleanuppad), } } @@ -108,7 +108,7 @@ impl Funclet<'ll> { self.cleanuppad } - pub fn bundle(&self) -> &OperandBundleDef<'ll> { + pub fn bundle(&self) -> &OperandBundleDef<'ll, &'ll Value> { &self.operand } } diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index 4343c8c184ecf..c01a625806bd4 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -28,6 +28,7 @@ use std::ffi::CStr; use std::cell::RefCell; use libc::{self, c_uint, c_char, size_t}; use rustc_data_structures::small_c_str::SmallCStr; +use traits; pub mod archive_ro; pub mod diagnostic; @@ -271,6 +272,10 @@ impl OperandBundleDef<'a> { }; OperandBundleDef { raw: def } } + + pub fn from_generic(bundle : &traits::OperandBundleDef<'a, &'a Value>) -> Self { + Self::new(bundle.name, &[bundle.val]) + } } impl Drop for OperandBundleDef<'a> { diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index cdb5a93f0f86d..daa87426fcc4f 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -9,7 +9,6 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::OperandBundleDef; use common::*; use type_::Type; use libc::c_char; @@ -17,10 +16,25 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::Session; use builder::MemFlags; +use value::Value; use std::borrow::Cow; use std::ops::Range; +pub struct OperandBundleDef<'a, Value : 'a> { + pub name: &'a str, + pub val: Value +} + +impl OperandBundleDef<'ll, &'ll Value> { + pub fn new(name: &'ll str, val: &'ll Value) -> Self { + OperandBundleDef { + name, + val, + } + } +} + pub enum IntPredicate { IntEQ, IntNE, @@ -97,7 +111,7 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, - bundle: Option<&OperandBundleDef<'ll>> + bundle: Option<&OperandBundleDef<'ll, &'ll Value>> ) -> &'ll Value; fn unreachable(&self); fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; @@ -305,6 +319,6 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size); fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value; + bundle: Option<&OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value; fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; } From 6d78a142817a1ad09f2adb6a7a5d101028ecdd00 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 17:54:12 +0200 Subject: [PATCH 09/76] Generalized AtomicRmwBinOp for BuilderMethods --- src/librustc_codegen_llvm/builder.rs | 10 ++++++++-- src/librustc_codegen_llvm/intrinsic.rs | 24 ++++++++++++------------ src/librustc_codegen_llvm/llvm/ffi.rs | 18 ++++++++++++++++++ src/librustc_codegen_llvm/traits.rs | 22 ++++++++++++++++++---- 4 files changed, 56 insertions(+), 18 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 63de9fcf37e35..4b8a760c6d054 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1075,13 +1075,19 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn atomic_rmw( &self, - op: AtomicRmwBinOp, + op: traits::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, order: AtomicOrdering, ) -> &'ll Value { unsafe { - llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False) + llvm::LLVMBuildAtomicRMW( + self.llbuilder, + AtomicRmwBinOp::from_generic(op), + dst, + src, + order, + False) } } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index ae540f43f4155..72b9f7b76d167 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -30,7 +30,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use traits::BuilderMethods; +use traits::{BuilderMethods, AtomicRmwBinOp}; use rustc::session::Session; use syntax_pos::Span; @@ -510,17 +510,17 @@ pub fn codegen_intrinsic_call( // These are all AtomicRMW ops op => { let atom_op = match op { - "xchg" => llvm::AtomicXchg, - "xadd" => llvm::AtomicAdd, - "xsub" => llvm::AtomicSub, - "and" => llvm::AtomicAnd, - "nand" => llvm::AtomicNand, - "or" => llvm::AtomicOr, - "xor" => llvm::AtomicXor, - "max" => llvm::AtomicMax, - "min" => llvm::AtomicMin, - "umax" => llvm::AtomicUMax, - "umin" => llvm::AtomicUMin, + "xchg" => AtomicRmwBinOp::AtomicXchg, + "xadd" => AtomicRmwBinOp::AtomicAdd, + "xsub" => AtomicRmwBinOp::AtomicSub, + "and" => AtomicRmwBinOp::AtomicAnd, + "nand" => AtomicRmwBinOp::AtomicNand, + "or" => AtomicRmwBinOp::AtomicOr, + "xor" => AtomicRmwBinOp::AtomicXor, + "max" => AtomicRmwBinOp::AtomicMax, + "min" => AtomicRmwBinOp::AtomicMin, + "umax" => AtomicRmwBinOp::AtomicUMax, + "umin" => AtomicRmwBinOp::AtomicUMin, _ => cx.sess().fatal("unknown atomic operation") }; diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index b9c573fa23701..3c63ec71c75e7 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -244,6 +244,24 @@ pub enum AtomicRmwBinOp { AtomicUMin = 10, } +impl AtomicRmwBinOp { + pub fn from_generic(op : traits::AtomicRmwBinOp) -> Self { + match op { + traits::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + traits::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + traits::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + traits::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + traits::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + traits::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + traits::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + traits::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + traits::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + traits::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + traits::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + } + } +} + /// LLVMAtomicOrdering #[derive(Copy, Clone)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index daa87426fcc4f..291d63377b46d 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use llvm::{AtomicOrdering, SynchronizationScope, AsmDialect}; use common::*; use type_::Type; use libc::c_char; @@ -30,7 +30,7 @@ impl OperandBundleDef<'ll, &'ll Value> { pub fn new(name: &'ll str, val: &'ll Value) -> Self { OperandBundleDef { name, - val, + val } } } @@ -45,7 +45,7 @@ pub enum IntPredicate { IntSGT, IntSGE, IntSLT, - IntSLE, + IntSLE } #[allow(dead_code)] @@ -65,7 +65,21 @@ pub enum RealPredicate { RealULT, RealULE, RealUNE, - RealPredicateTrue, + RealPredicateTrue +} + +pub enum AtomicRmwBinOp { + AtomicXchg, + AtomicAdd, + AtomicSub, + AtomicAnd, + AtomicNand, + AtomicOr, + AtomicXor, + AtomicMax, + AtomicMin, + AtomicUMax, + AtomicUMin } pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, From 733840b6547b0790bb765aa7a594b1fcd337acb5 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 18:08:20 +0200 Subject: [PATCH 10/76] Generalized AtomicOrdering for BuilderMethods --- src/librustc_codegen_llvm/builder.rs | 45 +++++++++++++++++++------- src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_llvm/llvm/ffi.rs | 16 +++++++++ src/librustc_codegen_llvm/traits.rs | 15 ++++++++- 4 files changed, 64 insertions(+), 14 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 4b8a760c6d054..5d81d09c39a42 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -488,10 +488,15 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value { + fn atomic_load(&self, ptr: &'ll Value, order: traits::AtomicOrdering, align: Align) -> &'ll Value { self.count_insn("load.atomic"); unsafe { - let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order) + ); // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? // However, 64-bit atomic loads on `i686-apple-darwin` appear to // require `___atomic_load` with ABI-alignment, so it's staying. @@ -536,12 +541,17 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: AtomicOrdering, align: Align) { + order: traits::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); unsafe { - let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order) + ); // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? // Also see `atomic_load` for more context. llvm::LLVMSetAlignment(store, align.pref() as c_uint); @@ -1063,14 +1073,21 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: AtomicOrdering, - failure_order: AtomicOrdering, + order: traits::AtomicOrdering, + failure_order: traits::AtomicOrdering, weak: bool, ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; unsafe { - llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, - order, failure_order, weak) + llvm::LLVMRustBuildAtomicCmpXchg( + self.llbuilder, + dst, + cmp, + src, + AtomicOrdering::from_generic(order), + AtomicOrdering::from_generic(failure_order), + weak + ) } } fn atomic_rmw( @@ -1078,7 +1095,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> op: traits::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: AtomicOrdering, + order: traits::AtomicOrdering, ) -> &'ll Value { unsafe { llvm::LLVMBuildAtomicRMW( @@ -1086,14 +1103,18 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> AtomicRmwBinOp::from_generic(op), dst, src, - order, + AtomicOrdering::from_generic(order), False) } } - fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { + fn atomic_fence(&self, order: traits::AtomicOrdering, scope: SynchronizationScope) { unsafe { - llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); + llvm::LLVMRustBuildAtomicFence( + self.llbuilder, + AtomicOrdering::from_generic(order), + scope + ); } } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 72b9f7b76d167..c4d9fa45e0ca3 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -416,7 +416,7 @@ pub fn codegen_intrinsic_call( // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst name if name.starts_with("atomic_") => { - use llvm::AtomicOrdering::*; + use traits::AtomicOrdering::*; let split: Vec<&str> = name.split('_').collect(); diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 3c63ec71c75e7..d7dafe09c6124 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -277,6 +277,22 @@ pub enum AtomicOrdering { SequentiallyConsistent = 7, } +impl AtomicOrdering { + pub fn from_generic(ao : traits::AtomicOrdering) -> Self { + match ao { + traits::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + traits::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + traits::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + traits::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + traits::AtomicOrdering::Release => AtomicOrdering::Release, + traits::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, + traits::AtomicOrdering::SequentiallyConsistent => + AtomicOrdering::SequentiallyConsistent + } + } +} + + /// LLVMRustSynchronizationScope #[derive(Copy, Clone)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index 291d63377b46d..87155f8d15b1c 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{AtomicOrdering, SynchronizationScope, AsmDialect}; +use llvm::{SynchronizationScope, AsmDialect}; use common::*; use type_::Type; use libc::c_char; @@ -82,6 +82,19 @@ pub enum AtomicRmwBinOp { AtomicUMin } +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic, + Unordered, + Monotonic, + // Consume, // Not specified yet. + Acquire, + Release, + AcquireRelease, + SequentiallyConsistent, +} + + pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized, BasicBlock: ?Sized From a4019740885abf06693c299e686f68d7c90e151c Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 18:15:29 +0200 Subject: [PATCH 11/76] Generalized SynchronisationScope for BuilderMethods --- src/librustc_codegen_llvm/builder.rs | 4 ++-- src/librustc_codegen_llvm/intrinsic.rs | 6 +++--- src/librustc_codegen_llvm/llvm/ffi.rs | 10 ++++++++++ src/librustc_codegen_llvm/traits.rs | 10 +++++++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 5d81d09c39a42..5b23ae59549b9 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1108,12 +1108,12 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn atomic_fence(&self, order: traits::AtomicOrdering, scope: SynchronizationScope) { + fn atomic_fence(&self, order: traits::AtomicOrdering, scope: traits::SynchronizationScope) { unsafe { llvm::LLVMRustBuildAtomicFence( self.llbuilder, AtomicOrdering::from_generic(order), - scope + SynchronizationScope::from_generic(scope) ); } } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index c4d9fa45e0ca3..aeec19d9baa0e 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -30,7 +30,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use traits::{BuilderMethods, AtomicRmwBinOp}; +use traits::{BuilderMethods, AtomicRmwBinOp, SynchronizationScope}; use rustc::session::Session; use syntax_pos::Span; @@ -498,12 +498,12 @@ pub fn codegen_intrinsic_call( } "fence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); + bx.atomic_fence(order, SynchronizationScope::CrossThread); return; } "singlethreadfence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); + bx.atomic_fence(order, SynchronizationScope::SingleThread); return; } diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index d7dafe09c6124..4e8809eb3e7ba 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -304,6 +304,16 @@ pub enum SynchronizationScope { CrossThread, } +impl SynchronizationScope { + pub fn from_generic(sc : traits::SynchronizationScope) -> Self { + match sc { + traits::SynchronizationScope::Other => SynchronizationScope::Other, + traits::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, + traits::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, + } + } +} + /// LLVMRustFileType #[derive(Copy, Clone)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index 87155f8d15b1c..ae03e19b4a75b 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{SynchronizationScope, AsmDialect}; +use llvm::AsmDialect; use common::*; use type_::Type; use libc::c_char; @@ -94,6 +94,14 @@ pub enum AtomicOrdering { SequentiallyConsistent, } +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, Value : ?Sized, From b83a2b76842bd314f4b523bba918b50c5611dee0 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 21 Aug 2018 18:22:29 +0200 Subject: [PATCH 12/76] Generalized AsmDialect for BuilderMethods --- src/librustc_codegen_llvm/asm.rs | 8 +------- src/librustc_codegen_llvm/builder.rs | 5 +++-- src/librustc_codegen_llvm/llvm/ffi.rs | 10 ++++++++++ src/librustc_codegen_llvm/traits.rs | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index e61dd0538ccc0..3e2eb47a2ee83 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -22,7 +22,6 @@ use mir::place::PlaceRef; use mir::operand::OperandValue; use std::ffi::CString; -use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM @@ -82,11 +81,6 @@ pub fn codegen_inline_asm( _ => Type::struct_(bx.cx, &output_types, false) }; - let dialect = match ia.dialect { - AsmDialect::Att => llvm::AsmDialect::Att, - AsmDialect::Intel => llvm::AsmDialect::Intel, - }; - let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); let r = bx.inline_asm_call( @@ -96,7 +90,7 @@ pub fn codegen_inline_asm( output_type, ia.volatile, ia.alignstack, - dialect + ia.dialect ); if r.is_none() { return false; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 5b23ae59549b9..702ffed9c392e 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -20,6 +20,7 @@ use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; use traits::{self, BuilderMethods}; +use syntax; use std::borrow::Cow; use std::ops::Range; @@ -740,7 +741,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, inputs: &[&'ll Value], output: &'ll Type, volatile: bool, alignstack: bool, - dia: AsmDialect) -> Option<&'ll Value> { + dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -761,7 +762,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> debug!("Constraint verification result: {:?}", constraints_ok); if constraints_ok { let v = llvm::LLVMRustInlineAsm( - fty, asm, cons, volatile, alignstack, dia); + fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia)); Some(self.call(v, inputs, None)) } else { // LLVM has detected an issue with our constaints, bail out diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 4e8809eb3e7ba..cf723e184f623 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -20,6 +20,7 @@ use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; use traits; +use syntax; use super::RustString; @@ -354,6 +355,15 @@ pub enum AsmDialect { Intel, } +impl AsmDialect { + pub fn from_generic(asm : syntax::ast::AsmDialect) -> Self { + match asm { + syntax::ast::AsmDialect::Att => AsmDialect::Att, + syntax::ast::AsmDialect::Intel => AsmDialect::Intel + } + } +} + /// LLVMRustCodeGenOptLevel #[derive(Copy, Clone, PartialEq)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index ae03e19b4a75b..ed3352ca4493b 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::AsmDialect; use common::*; use type_::Type; use libc::c_char; @@ -20,6 +19,7 @@ use value::Value; use std::borrow::Cow; use std::ops::Range; +use syntax::ast::AsmDialect; pub struct OperandBundleDef<'a, Value : 'a> { pub name: &'a str, From 958f94b40f4222ca52f5573e5ab86e63a31a1d62 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 22 Aug 2018 17:48:32 +0200 Subject: [PATCH 13/76] Removed genericity over Value in various functions Prelude to using associated types in traits rather than type parameters --- src/librustc_codegen_llvm/abi.rs | 8 +- src/librustc_codegen_llvm/back/write.rs | 3 +- src/librustc_codegen_llvm/base.rs | 13 +-- src/librustc_codegen_llvm/builder.rs | 6 +- src/librustc_codegen_llvm/common.rs | 33 +++---- src/librustc_codegen_llvm/context.rs | 42 ++++---- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- src/librustc_codegen_llvm/declare.rs | 12 +-- src/librustc_codegen_llvm/intrinsic.rs | 20 ++-- src/librustc_codegen_llvm/mir/operand.rs | 10 +- src/librustc_codegen_llvm/type_.rs | 106 ++++++++++----------- src/librustc_codegen_llvm/type_of.rs | 4 +- src/librustc_codegen_llvm/value.rs | 14 --- 13 files changed, 121 insertions(+), 152 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 4f1ac352500fe..20b56be2482c9 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -121,7 +121,7 @@ impl LlvmType for Reg { } } RegKind::Vector => { - Type::vector::(Type::i8(cx), self.size.bytes()) + Type::vector(Type::i8(cx), self.size.bytes()) } } } @@ -145,7 +145,7 @@ impl LlvmType for CastTarget { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return Type::array::(rest_ll_unit, rest_count); + return Type::array(rest_ll_unit, rest_count); } } @@ -645,9 +645,9 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } if self.variadic { - Type::variadic_func::(&llargument_tys, llreturn_ty) + Type::variadic_func(&llargument_tys, llreturn_ty) } else { - Type::func::(&llargument_tys, llreturn_ty) + Type::func(&llargument_tys, llreturn_ty) } } diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index b92ee8c484e74..81619c219757b 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -49,7 +49,6 @@ use context::{is_pie_binary, get_reloc_model}; use common::{C_bytes_in_context, val_ty}; use jobserver::{Client, Acquired}; use rustc_demangle; -use value::Value; use std::any::Any; use std::ffi::{CString, CStr}; @@ -2543,7 +2542,7 @@ fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::M "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx::(llcx); + let i8p_ty = Type::i8p_llcx(llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage && diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index bc07550c34760..16de4b6821aa2 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -90,7 +90,7 @@ use syntax_pos::symbol::InternedString; use syntax::attr; use rustc::hir::{self, CodegenFnAttrs}; -use value::{Value, ValueTrait}; +use value::Value; use mir::operand::OperandValue; @@ -392,11 +392,10 @@ pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { } pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, - Value : ?Sized, Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( bx: &Builder, val: &'ll Value -) -> &'ll Value where Value : ValueTrait { +) -> &'ll Value { if val_ty(val) == Type::i1(bx.cx()) { bx.zext(val, Type::i8(bx.cx())) } else { @@ -427,7 +426,6 @@ pub fn to_immediate_scalar( } pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, - Value : ?Sized, Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( bx: &Builder, dst: &'ll Value, @@ -435,7 +433,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, n_bytes: &'ll Value, align: Align, flags: MemFlags, -) where Value : ValueTrait { +) { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = bx.load(src, align); @@ -456,7 +454,6 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, } pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, - Value : ?Sized, Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( bx: &Builder, dst: &'ll Value, @@ -464,7 +461,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, layout: TyLayout<'tcx>, align: Align, flags: MemFlags, -) where Value : ValueTrait { +) { let size = layout.size.bytes(); if size == 0 { return; @@ -562,7 +559,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { use_start_lang_item: bool, ) { let llfty = - Type::func::(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); + Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); // Given that `main()` has no arguments, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 702ffed9c392e..8239e746f4d26 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -755,7 +755,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = Type::func::(&argtys[..], output); + let fty = Type::func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); @@ -833,9 +833,9 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = val_ty(elt); - let undef = llvm::LLVMGetUndef(Type::vector::(elt_ty, num_elts as u64)); + let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); - let vec_i32_ty = Type::vector::(Type::i32(self.cx), num_elts as u64); + let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) } } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 60fd08ce92d23..7c24f9fe42191 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -23,7 +23,7 @@ use consts; use declare; use type_::Type; use type_of::LayoutLlvmExt; -use value::{Value, ValueTrait}; +use value::Value; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; @@ -113,9 +113,9 @@ impl Funclet<'ll> { } } -pub fn val_ty(v: &'ll Value) -> &'ll Type where Value : ValueTrait { +pub fn val_ty(v: &'ll Value) -> &'ll Type { unsafe { - llvm::LLVMTypeOf(v.to_llvm()) + llvm::LLVMTypeOf(v) } } @@ -126,21 +126,21 @@ pub fn C_null(t: &'ll Type) -> &'ll Value { } } -pub fn C_undef(t: &'ll Type) -> &'ll Value where Value : ValueTrait { +pub fn C_undef(t: &'ll Type) -> &'ll Value { unsafe { - Value::of_llvm(llvm::LLVMGetUndef(t)) + llvm::LLVMGetUndef(t) } } -pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value where Value : ValueTrait { +pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { unsafe { - Value::of_llvm(llvm::LLVMConstInt(t, i as u64, True)) + llvm::LLVMConstInt(t, i as u64, True) } } -pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value where Value : ValueTrait { +pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { unsafe { - Value::of_llvm(llvm::LLVMConstInt(t, i, False)) + llvm::LLVMConstInt(t, i, False) } } @@ -151,17 +151,11 @@ pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { } } -pub fn C_bool( - cx: &CodegenCx<'ll, '_, &'ll Value>, - val: bool -) -> &'ll Value where Value : ValueTrait { +pub fn C_bool(cx: &CodegenCx<'ll, '_, &'ll Value>, val: bool) -> &'ll Value { C_uint(Type::i1(cx), val as u64) } -pub fn C_i32( - cx: &CodegenCx<'ll, '_, &'ll Value>, - i: i32 -) -> &'ll Value where Value : ValueTrait { +pub fn C_i32(cx: &CodegenCx<'ll, '_, &'ll Value>, i: i32) -> &'ll Value { C_int(Type::i32(cx), i as i64) } @@ -173,10 +167,7 @@ pub fn C_u64(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { C_uint(Type::i64(cx), i) } -pub fn C_usize( - cx: &CodegenCx<'ll, '_, &'ll Value>, - i: u64 -) -> &'ll Value where Value : ValueTrait { +pub fn C_usize(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { let bit_size = cx.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 86ef97528053f..25655ffde67c7 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -18,7 +18,7 @@ use callee; use base; use declare; use monomorphize::Instance; -use value::{Value, ValueTrait}; +use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; @@ -283,7 +283,7 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { None }; - let isize_ty = Type::ix_llcx::(llcx, tcx.data_layout.pointer_size.bits()); + let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits()); CodegenCx { tcx, @@ -315,7 +315,7 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { } } -impl<'b, 'tcx, Value : ?Sized> CodegenCx<'b, 'tcx, &'b Value> where Value : ValueTrait { +impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { pub fn sess<'a>(&'a self) -> &'a Session { &self.tcx.sess } @@ -379,7 +379,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } else { "rust_eh_personality" }; - let fty = Type::variadic_func::(&[], Type::i32(self)); + let fty = Type::variadic_func(&[], Type::i32(self)); declare::declare_cfn(self, name, fty) } }; @@ -447,9 +447,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } } -impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> - where Value : ValueTrait -{ +impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.tcx.data_layout } @@ -482,31 +480,31 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx, &'ll Value> { } /// Declare any llvm intrinsics that you might need -fn declare_intrinsic( +fn declare_intrinsic( cx: &CodegenCx<'ll, '_, &'ll Value>, key: &str -) -> Option<&'ll Value> where Value : ValueTrait { +) -> Option<&'ll Value> { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func::(&[], $ret)); - llvm::SetUnnamedAddr(f.to_llvm(), false); + let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); } ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::variadic_func::(&[], $ret)); - llvm::SetUnnamedAddr(f.to_llvm(), false); + let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); } ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func::(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f.to_llvm(), false); + let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); } @@ -527,14 +525,14 @@ fn declare_intrinsic( let t_f32 = Type::f32(cx); let t_f64 = Type::f64(cx); - let t_v2f32 = Type::vector::(t_f32, 2); - let t_v4f32 = Type::vector::(t_f32, 4); - let t_v8f32 = Type::vector::(t_f32, 8); - let t_v16f32 = Type::vector::(t_f32, 16); + let t_v2f32 = Type::vector(t_f32, 2); + let t_v4f32 = Type::vector(t_f32, 4); + let t_v8f32 = Type::vector(t_f32, 8); + let t_v16f32 = Type::vector(t_f32, 16); - let t_v2f64 = Type::vector::(t_f64, 2); - let t_v4f64 = Type::vector::(t_f64, 4); - let t_v8f64 = Type::vector::(t_f64, 8); + let t_v2f64 = Type::vector(t_f64, 2); + let t_v4f64 = Type::vector(t_f64, 4); + let t_v8f64 = Type::vector(t_f64, 8); ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 64d224b02853d..4a546f542977b 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -56,7 +56,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array::(Type::i8(cx), + let llvm_type = Type::array(Type::i8(cx), section_contents.len() as u64); let section_var = declare::define_global(cx, section_var_name, diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index d197ccfa8ec5d..dfdbf7290ea7b 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -32,7 +32,7 @@ use attributes; use context::CodegenCx; use common; use type_::Type; -use value::{Value, ValueTrait}; +use value::Value; /// Declare a global value. @@ -55,12 +55,12 @@ pub fn declare_global( /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. -fn declare_raw_fn( +fn declare_raw_fn( cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str, callconv: llvm::CallConv, ty: &'ll Type, -) -> &'ll Value where Value : ValueTrait { +) -> &'ll Value { debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); let namebuf = SmallCStr::new(name); let llfn = unsafe { @@ -109,7 +109,7 @@ fn declare_raw_fn( attributes::non_lazy_bind(cx.sess(), llfn); - Value::of_llvm(llfn) + llfn } @@ -120,11 +120,11 @@ fn declare_raw_fn( /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. -pub fn declare_cfn( +pub fn declare_cfn( cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str, fn_type: &'ll Type -) -> &'ll Value where Value : ValueTrait { +) -> &'ll Value { declare_raw_fn(cx, name, llvm::CCallConv, fn_type) } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index aeec19d9baa0e..a1f047f638e5a 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -573,7 +573,7 @@ pub fn codegen_intrinsic_call( Vector(ref t, ref llvm_elem, length) => { let t = llvm_elem.as_ref().unwrap_or(t); let elem = one(ty_to_type(cx, t)); - vec![Type::vector::(elem, length as u64)] + vec![Type::vector(elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() @@ -624,7 +624,7 @@ pub fn codegen_intrinsic_call( let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); vec![ bx.bitcast(arg.immediate(), - Type::vector::(llvm_elem, length as u64)) + Type::vector(llvm_elem, length as u64)) ] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { @@ -653,7 +653,7 @@ pub fn codegen_intrinsic_call( intrinsics::IntrinsicDef::Named(name) => { let f = declare::declare_cfn(cx, name, - Type::func::(&inputs, outputs)); + Type::func(&inputs, outputs)); bx.call(f, &llargs, None) } }; @@ -1154,7 +1154,7 @@ fn generic_simd_intrinsic( } // truncate the mask to a vector of i1s let i1 = Type::i1(bx.cx); - let i1xn = Type::vector::(i1, m_len as u64); + let i1xn = Type::vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1295,7 +1295,7 @@ fn generic_simd_intrinsic( elem_ty = elem_ty.ptr_to(); no_pointers -= 1; } - Type::vector::(elem_ty, vec_len as u64) + Type::vector(elem_ty, vec_len as u64) } @@ -1378,7 +1378,7 @@ fn generic_simd_intrinsic( // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { let i1 = Type::i1(bx.cx); - let i1xn = Type::vector::(i1, in_len as u64); + let i1xn = Type::vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; @@ -1393,7 +1393,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func::(&[ + Type::func(&[ llvm_pointer_vec_ty, alignment_ty, mask_ty, @@ -1478,7 +1478,7 @@ fn generic_simd_intrinsic( // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { let i1 = Type::i1(bx.cx); - let i1xn = Type::vector::(i1, in_len as u64); + let i1xn = Type::vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; @@ -1495,7 +1495,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func::(&[llvm_elem_vec_ty, + Type::func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1630,7 +1630,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, // boolean reductions operate on vectors of i1s: let i1 = Type::i1(bx.cx); - let i1xn = Type::vector::(i1, in_len as u64); + let i1xn = Type::vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 80f7ab9f0b83f..4515f85d9c21e 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -17,7 +17,7 @@ use rustc_data_structures::sync::Lrc; use base; use common::{CodegenCx, C_undef, C_usize}; use builder::{Builder, MemFlags}; -use value::{Value, ValueTrait}; +use value::Value; use type_of::LayoutLlvmExt; use type_::Type; use glue; @@ -64,7 +64,7 @@ pub struct OperandRef<'tcx, V> { pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'tcx, &'ll Value> where Value : ValueTrait { +impl fmt::Debug for OperandRef<'tcx, &'ll Value> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } @@ -280,10 +280,8 @@ impl OperandValue<&'ll Value> { } } -impl<'a, 'll: 'a, 'tcx: 'll, Value : ?Sized> OperandValue<&'ll Value> where - Value : ValueTrait, - Builder<'a, 'll, 'tcx, &'ll Value>: - BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> +impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> where + Builder<'a, 'll, 'tcx, &'ll Value>: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> { pub fn nontemporal_store( self, diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index e4e4eca3fbbdd..cfbd1b766674c 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -16,7 +16,7 @@ use llvm; use llvm::{Bool, False, True, TypeKind}; use context::CodegenCx; -use value::{Value, ValueTrait}; +use value::Value; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; @@ -41,143 +41,143 @@ impl fmt::Debug for Type { } impl Type { - pub fn void( + pub fn void( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(cx.llcx) } } - pub fn metadata( + pub fn metadata( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMRustMetadataTypeInContext(cx.llcx) } } - pub fn i1( + pub fn i1( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMInt1TypeInContext(cx.llcx) } } - pub fn i8( + pub fn i8( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMInt8TypeInContext(cx.llcx) } } - pub fn i8_llcx(llcx: &llvm::Context) -> &Type where Value : ValueTrait { + pub fn i8_llcx(llcx: &llvm::Context) -> &Type { unsafe { llvm::LLVMInt8TypeInContext(llcx) } } - pub fn i16( - cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type where Value : ValueTrait { + pub fn i16( + cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { unsafe { llvm::LLVMInt16TypeInContext(cx.llcx) } } - pub fn i32( + pub fn i32( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMInt32TypeInContext(cx.llcx) } } - pub fn i64( + pub fn i64( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMInt64TypeInContext(cx.llcx) } } - pub fn i128( + pub fn i128( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(cx.llcx, 128) } } // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix( + pub fn ix( cx: &CodegenCx<'ll, '_, &'ll Value>, num_bits: u64 - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) } } // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix_llcx( + pub fn ix_llcx( llcx: &llvm::Context, num_bits: u64 - ) -> &Type where Value : ValueTrait { + ) -> &Type { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } } - pub fn f32( + pub fn f32( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMFloatTypeInContext(cx.llcx) } } - pub fn f64( + pub fn f64( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMDoubleTypeInContext(cx.llcx) } } - pub fn bool( + pub fn bool( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { Type::i8(cx) } - pub fn char( + pub fn char( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { Type::i32(cx) } - pub fn i8p( + pub fn i8p( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { Type::i8(cx).ptr_to() } - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type where Value : ValueTrait { - Type::i8_llcx::(llcx).ptr_to() + pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { + Type::i8_llcx(llcx).ptr_to() } - pub fn isize( + pub fn isize( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { cx.isize_ty } - pub fn c_int( + pub fn c_int( cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { match &cx.tcx.sess.target.target.target_c_int_width[..] { "16" => Type::i16(cx), "32" => Type::i32(cx), @@ -186,10 +186,10 @@ impl Type { } } - pub fn int_from_ty( + pub fn int_from_ty( cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::IntTy - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { match t { ast::IntTy::Isize => cx.isize_ty, ast::IntTy::I8 => Type::i8(cx), @@ -200,10 +200,10 @@ impl Type { } } - pub fn uint_from_ty( + pub fn uint_from_ty( cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::UintTy - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { match t { ast::UintTy::Usize => cx.isize_ty, ast::UintTy::U8 => Type::i8(cx), @@ -214,41 +214,41 @@ impl Type { } } - pub fn float_from_ty( + pub fn float_from_ty( cx: &CodegenCx<'ll, '_, &'ll Value>, t: ast::FloatTy - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { match t { ast::FloatTy::F32 => Type::f32(cx), ast::FloatTy::F64 => Type::f64(cx), } } - pub fn func( + pub fn func( args: &[&'ll Type], ret: &'ll Type - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) } } - pub fn variadic_func( + pub fn variadic_func( args: &[&'ll Type], ret: &'ll Type - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } - pub fn struct_( + pub fn struct_( cx: &CodegenCx<'ll, '_, &'ll Value>, els: &[&'ll Type], packed: bool - ) -> &'ll Type where Value : ValueTrait { + ) -> &'ll Type { unsafe { llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), els.len() as c_uint, @@ -264,13 +264,13 @@ impl Type { } - pub fn array(ty: &Type, len: u64) -> &Type where Value : ValueTrait { + pub fn array(ty: &Type, len: u64) -> &Type { unsafe { llvm::LLVMRustArrayType(ty, len) } } - pub fn vector(ty: &Type, len: u64) -> &Type where Value : ValueTrait { + pub fn vector(ty: &Type, len: u64) -> &Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } @@ -365,7 +365,7 @@ impl Type { let size = size.bytes(); let unit_size = unit.size().bytes(); assert_eq!(size % unit_size, 0); - Type::array::(Type::from_integer(cx, unit), size / unit_size) + Type::array(Type::from_integer(cx, unit), size / unit_size) } pub fn x86_mmx(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index ebd4aec1a3542..e798f4e73f7f7 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -41,7 +41,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, return Type::x86_mmx(cx) } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return Type::vector::(element, count); + return Type::vector(element, count); } } layout::Abi::ScalarPair(..) => { @@ -94,7 +94,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, } } layout::FieldPlacement::Array { count, .. } => { - Type::array::(layout.field(cx, 0).llvm_type(cx), count) + Type::array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index b405a72724ef1..4bf5b09baa629 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -15,26 +15,12 @@ use llvm; use std::fmt; use std::hash::{Hash, Hasher}; -pub trait ValueTrait : fmt::Debug { - fn to_llvm(&self) -> &llvm::Value; - fn of_llvm(&llvm::Value) -> &Self; -} - impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ } } -impl ValueTrait for Value { - fn to_llvm(&self) -> &llvm::Value { - &self - } - fn of_llvm(v: &llvm::Value) -> &Self { - &v - } -} - impl Eq for Value {} impl Hash for Value { From 234f53747ceee8d97ec136992b55be3dec085a30 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 22 Aug 2018 17:52:15 +0200 Subject: [PATCH 14/76] Line too long split --- src/librustc_codegen_llvm/builder.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 8239e746f4d26..4e284bda5a0c2 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -489,7 +489,12 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn atomic_load(&self, ptr: &'ll Value, order: traits::AtomicOrdering, align: Align) -> &'ll Value { + fn atomic_load( + &self, + ptr: &'ll Value, + order: traits::AtomicOrdering, + align: Align + ) -> &'ll Value { self.count_insn("load.atomic"); unsafe { let load = llvm::LLVMRustBuildAtomicLoad( From 9be7a5eda3afc768b037696a6b47ec8b43c922c4 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 22 Aug 2018 18:57:31 +0200 Subject: [PATCH 15/76] Use associated types instead of type parameters inside the BuilderMethods trait --- src/librustc_codegen_llvm/base.rs | 16 +- src/librustc_codegen_llvm/builder.rs | 334 +++++++++++----------- src/librustc_codegen_llvm/mir/operand.rs | 9 +- src/librustc_codegen_llvm/traits.rs | 337 +++++++++++------------ 4 files changed, 346 insertions(+), 350 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 16de4b6821aa2..8e5667ace5b21 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -76,7 +76,6 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; use traits::{IntPredicate, RealPredicate, BuilderMethods}; -use llvm::BasicBlock; use std::any::Any; use std::ffi::CString; @@ -391,9 +390,8 @@ pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { bx.call(assume_intrinsic, &[val], None); } -pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, - Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( - bx: &Builder, +pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( + bx: &Builder<'_ ,'ll, '_, &'ll Value>, val: &'ll Value ) -> &'ll Value { if val_ty(val) == Type::i1(bx.cx()) { @@ -425,9 +423,8 @@ pub fn to_immediate_scalar( val } -pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, - Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( - bx: &Builder, +pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( + bx: &Builder<'_ ,'ll, '_, &'ll Value>, dst: &'ll Value, src: &'ll Value, n_bytes: &'ll Value, @@ -453,9 +450,8 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, - Builder: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>>( - bx: &Builder, +pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>( + bx: &Builder<'_ ,'ll, '_, &'ll Value>, dst: &'ll Value, src: &'ll Value, layout: TyLayout<'tcx>, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 4e284bda5a0c2..6c766dfa17fc4 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,11 +9,10 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{False, OperandBundleDef}; -use llvm::{self, BasicBlock}; +use llvm::{self, False, OperandBundleDef}; use common::*; -use type_::Type; -use value::Value; +use type_; +use value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; @@ -56,11 +55,16 @@ bitflags! { } } -impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> - for Builder<'a, 'll, 'tcx, &'ll Value> { +impl BuilderMethods<'a, 'll, 'tcx> + for Builder<'a, 'll, 'tcx, &'ll value::Value> { + + type Value = &'ll value::Value; + type BasicBlock = &'ll llvm::BasicBlock; + type Type = &'ll type_::Type; + fn new_block<'b>( - cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, - llfn: &'ll Value, + cx: &'a CodegenCx<'ll, 'tcx, Self::Value>, + llfn: Self::Value, name: &'b str ) -> Self { let bx = Builder::with_cx(cx); @@ -76,7 +80,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> bx } - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self { + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, Self::Value>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -99,13 +103,13 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> self.cx.tcx } - fn llfn(&self) -> &'ll Value { + fn llfn(&self) -> Self::Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - fn llbb(&self) -> &'ll BasicBlock { + fn llbb(&self) -> Self::BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } @@ -124,20 +128,20 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn set_value_name(&self, value: &'ll Value, name: &str) { + fn set_value_name(&self, value: Self::Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - fn position_at_end(&self, llbb: &'ll BasicBlock) { + fn position_at_end(&self, llbb: Self::BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - fn position_at_start(&self, llbb: &'ll BasicBlock) { + fn position_at_start(&self, llbb: Self::BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } @@ -150,14 +154,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn ret(&self, v: &'ll Value) { + fn ret(&self, v: Self::Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - fn br(&self, dest: &'ll BasicBlock) { + fn br(&self, dest: Self::BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); @@ -166,9 +170,9 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn cond_br( &self, - cond: &'ll Value, - then_llbb: &'ll BasicBlock, - else_llbb: &'ll BasicBlock, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, ) { self.count_insn("condbr"); unsafe { @@ -178,21 +182,21 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn switch( &self, - v: &'ll Value, - else_llbb: &'ll BasicBlock, + v: Self::Value, + else_llbb: Self::BasicBlock, num_cases: usize, - ) -> &'ll Value { + ) -> Self::Value { unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) } } fn invoke(&self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + bundle: Option<&traits::OperandBundleDef<'ll, Self::Value>>) -> Self::Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -222,21 +226,21 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } /* Arithmetic */ - fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -245,21 +249,21 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -268,21 +272,21 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -292,42 +296,42 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } - fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -336,28 +340,28 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -366,70 +370,70 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - fn neg(&self, v: &'ll Value) -> &'ll Value { + fn neg(&self, v: Self::Value) -> Self::Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - fn fneg(&self, v: &'ll Value) -> &'ll Value { + fn fneg(&self, v: Self::Value) -> Self::Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - fn not(&self, v: &'ll Value) -> &'ll Value { + fn not(&self, v: Self::Value) -> Self::Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value { let bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -437,7 +441,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> bx.dynamic_alloca(ty, name, align) } - fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -453,10 +457,10 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn array_alloca(&self, - ty: &'ll Type, - len: &'ll Value, + ty: Self::Type, + len: Self::Value, name: &str, - align: Align) -> &'ll Value { + align: Align) -> Self::Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -471,7 +475,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&self, ptr: Self::Value, align: Align) -> Self::Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -480,7 +484,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&self, ptr: Self::Value) -> Self::Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -491,10 +495,10 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn atomic_load( &self, - ptr: &'ll Value, + ptr: Self::Value, order: traits::AtomicOrdering, align: Align - ) -> &'ll Value { + ) -> Self::Value { self.count_insn("load.atomic"); unsafe { let load = llvm::LLVMRustBuildAtomicLoad( @@ -535,18 +539,18 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn nonnull_metadata(&self, load: &'ll Value) { + fn nonnull_metadata(&self, load: Self::Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, + fn atomic_store(&self, val: Self::Value, ptr: Self::Value, order: traits::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); @@ -566,11 +570,11 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn store_with_flags( &self, - val: &'ll Value, - ptr: &'ll Value, + val: Self::Value, + ptr: Self::Value, align: Align, flags: MemFlags, - ) -> &'ll Value { + ) -> Self::Value { debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); self.count_insn("store"); let ptr = self.check_store(val, ptr); @@ -598,7 +602,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -606,7 +610,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -615,77 +619,77 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } /* Casts */ - fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) @@ -693,14 +697,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } - fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value { self.count_insn("intcast"); unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("pointercast"); unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) @@ -708,7 +712,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } /* Comparisons */ - fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&self, op: traits::IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("icmp"); let op = llvm::IntPredicate::from_generic(op); unsafe { @@ -716,7 +720,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn fcmp(&self, op: traits::RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&self, op: traits::RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -724,14 +728,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } /* Miscellaneous instructions */ - fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + fn empty_phi(&self, ty: Self::Type) -> Self::Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -744,9 +748,9 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], output: &'ll Type, + inputs: &[Self::Value], output: Self::Type, volatile: bool, alignstack: bool, - dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { + dia: syntax::ast::AsmDialect) -> Option { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -760,7 +764,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = Type::func(&argtys[..], output); + let fty = type_::Type::func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); @@ -776,14 +780,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -792,10 +796,10 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn select( - &self, cond: &'ll Value, - then_val: &'ll Value, - else_val: &'ll Value, - ) -> &'ll Value { + &self, cond: Self::Value, + then_val: Self::Value, + else_val: Self::Value, + ) -> Self::Value { self.count_insn("select"); unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) @@ -803,14 +807,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } #[allow(dead_code)] - fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) @@ -818,34 +822,34 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn insert_element( - &self, vec: &'ll Value, - elt: &'ll Value, - idx: &'ll Value, - ) -> &'ll Value { + &self, vec: Self::Value, + elt: Self::Value, + idx: Self::Value, + ) -> Self::Value { self.count_insn("insertelement"); unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) } } - fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value { unsafe { let elt_ty = val_ty(elt); - let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); + let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); - let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); + let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64); self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) } } - fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once @@ -857,7 +861,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> instr } } - fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once @@ -869,56 +873,56 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> instr } } - fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_add(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.add"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") } } - fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.mul"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") } } - fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_and(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.and"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") } } - fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_or(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.or"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") } } - fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.xor"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") } } - fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.fmin"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") } } - fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.fmax"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") } } - fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) @@ -927,7 +931,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> instr } } - fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) @@ -936,14 +940,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> instr } } - fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value { self.count_insn("vector.reduce.min"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") } } - fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value { self.count_insn("vector.reduce.max"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); @@ -951,7 +955,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -959,8 +963,8 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, - idx: u64) -> &'ll Value { + fn insert_value(&self, agg_val: Self::Value, elt: Self::Value, + idx: u64) -> Self::Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -969,8 +973,8 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, - num_clauses: usize) -> &'ll Value { + fn landing_pad(&self, ty: Self::Type, pers_fn: Self::Value, + num_clauses: usize) -> Self::Value { self.count_insn("landingpad"); unsafe { llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, @@ -978,20 +982,20 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - fn set_cleanup(&self, landing_pad: &'ll Value) { + fn set_cleanup(&self, landing_pad: Self::Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - fn resume(&self, exn: &'ll Value) -> &'ll Value { + fn resume(&self, exn: Self::Value) -> Self::Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) @@ -999,8 +1003,8 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn cleanup_pad(&self, - parent: Option<&'ll Value>, - args: &[&'ll Value]) -> &'ll Value { + parent: Option, + args: &[Self::Value]) -> Self::Value { self.count_insn("cleanuppad"); let name = const_cstr!("cleanuppad"); let ret = unsafe { @@ -1014,9 +1018,9 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn cleanup_ret( - &self, cleanup: &'ll Value, - unwind: Option<&'ll BasicBlock>, - ) -> &'ll Value { + &self, cleanup: Self::Value, + unwind: Option, + ) -> Self::Value { self.count_insn("cleanupret"); let ret = unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind) @@ -1025,8 +1029,8 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } fn catch_pad(&self, - parent: &'ll Value, - args: &[&'ll Value]) -> &'ll Value { + parent: Self::Value, + args: &[Self::Value]) -> Self::Value { self.count_insn("catchpad"); let name = const_cstr!("catchpad"); let ret = unsafe { @@ -1037,7 +1041,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> ret.expect("LLVM does not have support for catchpad") } - fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + fn catch_ret(&self, pad: Self::Value, unwind: Self::BasicBlock) -> Self::Value { self.count_insn("catchret"); let ret = unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) @@ -1047,10 +1051,10 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn catch_switch( &self, - parent: Option<&'ll Value>, - unwind: Option<&'ll BasicBlock>, + parent: Option, + unwind: Option, num_handlers: usize, - ) -> &'ll Value { + ) -> Self::Value { self.count_insn("catchswitch"); let name = const_cstr!("catchswitch"); let ret = unsafe { @@ -1061,13 +1065,13 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> ret.expect("LLVM does not have support for catchswitch") } - fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - fn set_personality_fn(&self, personality: &'ll Value) { + fn set_personality_fn(&self, personality: Self::Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } @@ -1076,13 +1080,13 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> // Atomic Operations fn atomic_cmpxchg( &self, - dst: &'ll Value, - cmp: &'ll Value, - src: &'ll Value, + dst: Self::Value, + cmp: Self::Value, + src: Self::Value, order: traits::AtomicOrdering, failure_order: traits::AtomicOrdering, weak: bool, - ) -> &'ll Value { + ) -> Self::Value { let weak = if weak { llvm::True } else { llvm::False }; unsafe { llvm::LLVMRustBuildAtomicCmpXchg( @@ -1099,10 +1103,10 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> fn atomic_rmw( &self, op: traits::AtomicRmwBinOp, - dst: &'ll Value, - src: &'ll Value, + dst: Self::Value, + src: Self::Value, order: traits::AtomicOrdering, - ) -> &'ll Value { + ) -> Self::Value { unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, @@ -1124,20 +1128,20 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - fn set_invariant_load(&self, load: &'ll Value) { + fn set_invariant_load(&self, load: Self::Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); @@ -1146,8 +1150,8 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> /// Returns the ptr value that should be used for storing `val`. fn check_store<'b>(&self, - val: &'ll Value, - ptr: &'ll Value) -> &'ll Value { + val: Self::Value, + ptr: Self::Value) -> Self::Value { let dest_ptr_ty = val_ty(ptr); let stored_ty = val_ty(val); let stored_ptr_ty = stored_ty.ptr_to(); @@ -1167,8 +1171,8 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> /// Returns the args that should be used for a call to `llfn`. fn check_call<'b>(&self, typ: &str, - llfn: &'ll Value, - args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { + llfn: Self::Value, + args: &'b [Self::Value]) -> Cow<'b, [Self::Value]> { let mut fn_ty = val_ty(llfn); // Strip off pointers while fn_ty.kind() == llvm::TypeKind::Pointer { @@ -1207,11 +1211,11 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> Cow::Owned(casted_args) } - fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + fn lifetime_start(&self, ptr: Self::Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + fn lifetime_end(&self, ptr: Self::Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } @@ -1223,7 +1227,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> /// /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; } @@ -1235,12 +1239,12 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, Type::i8p(self.cx)); + let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx)); self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); } - fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { + fn call(&self, llfn: Self::Value, args: &[Self::Value], + bundle: Option<&traits::OperandBundleDef<'ll, Self::Value>>) -> Self::Value { self.count_insn("call"); debug!("Call {:?} with args ({:?})", @@ -1261,14 +1265,14 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { self.count_insn("zext"); unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) } } - fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { + fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value { self.count_insn("structgep"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -1276,7 +1280,7 @@ impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> } } - fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value> { + fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, Self::Value> { &self.cx } } diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 4515f85d9c21e..050f9a1990123 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -23,7 +23,6 @@ use type_::Type; use glue; use traits::BuilderMethods; -use llvm::BasicBlock; use std::fmt; @@ -280,9 +279,7 @@ impl OperandValue<&'ll Value> { } } -impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> where - Builder<'a, 'll, 'tcx, &'ll Value>: BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock> -{ +impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> { pub fn nontemporal_store( self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, @@ -291,9 +288,9 @@ impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> where self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags>( + fn store_with_flags( self, - bx: &Builder, + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>, flags: MemFlags, ) { diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/traits.rs index ed3352ca4493b..02efcbe8f8d42 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/traits.rs @@ -9,7 +9,6 @@ // except according to those terms. use common::*; -use type_::Type; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; @@ -103,257 +102,257 @@ pub enum SynchronizationScope { } -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll, - Value : ?Sized, - BasicBlock: ?Sized - > { +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { + type Value; + type BasicBlock; + type Type; fn new_block<'b>( - cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, - llfn: &'ll Value, + cx: &'a CodegenCx<'ll, 'tcx, Self::Value>, + llfn: Self::Value, name: &'b str ) -> Self; - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self; + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, Self::Value>) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; fn sess(&self) -> &Session; - fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value>; + fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, Self::Value>; fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn llfn(&self) -> &'ll Value; - fn llbb(&self) -> &'ll BasicBlock; + fn llfn(&self) -> Self::Value; + fn llbb(&self) -> Self::BasicBlock; fn count_insn(&self, category: &str); - fn set_value_name(&self, value: &'ll Value, name: &str); - fn position_at_end(&self, llbb: &'ll BasicBlock); - fn position_at_start(&self, llbb: &'ll BasicBlock); + fn set_value_name(&self, value: Self::Value, name: &str); + fn position_at_end(&self, llbb: Self::BasicBlock); + fn position_at_start(&self, llbb: Self::BasicBlock); fn ret_void(&self); - fn ret(&self, v: &'ll Value); - fn br(&self, dest: &'ll BasicBlock); + fn ret(&self, v: Self::Value); + fn br(&self, dest: Self::BasicBlock); fn cond_br( &self, - cond: &'ll Value, - then_llbb: &'ll BasicBlock, - else_llbb: &'ll BasicBlock, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, ); fn switch( &self, - v: &'ll Value, - else_llbb: &'ll BasicBlock, + v: Self::Value, + else_llbb: Self::BasicBlock, num_cases: usize, - ) -> &'ll Value; + ) -> Self::Value; fn invoke( &self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - bundle: Option<&OperandBundleDef<'ll, &'ll Value>> - ) -> &'ll Value; + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + bundle: Option<&OperandBundleDef<'ll, Self::Value>> + ) -> Self::Value; fn unreachable(&self); - fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn neg(&self, v: &'ll Value) -> &'ll Value; - fn fneg(&self, v: &'ll Value) -> &'ll Value; - fn not(&self, v: &'ll Value) -> &'ll Value; + fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn neg(&self, v: Self::Value) -> Self::Value; + fn fneg(&self, v: Self::Value) -> Self::Value; + fn not(&self, v: Self::Value) -> Self::Value; - fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value; - fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value; + fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value; fn array_alloca( &self, - ty: &'ll Type, - len: &'ll Value, + ty: Self::Type, + len: Self::Value, name: &str, align: Align - ) -> &'ll Value; + ) -> Self::Value; - fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value; - fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value; - fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value; + fn load(&self, ptr: Self::Value, align: Align) -> Self::Value; + fn volatile_load(&self, ptr: Self::Value) -> Self::Value; + fn atomic_load(&self, ptr: Self::Value, order: AtomicOrdering, align: Align) -> Self::Value; - fn range_metadata(&self, load: &'ll Value, range: Range); - fn nonnull_metadata(&self, load: &'ll Value); + fn range_metadata(&self, load: Self::Value, range: Range); + fn nonnull_metadata(&self, load: Self::Value); - fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value; + fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; fn atomic_store( &self, - val: &'ll Value, - ptr: &'ll Value, + val: Self::Value, + ptr: Self::Value, order: AtomicOrdering, align: Align ); fn store_with_flags( &self, - val: &'ll Value, - ptr: &'ll Value, + val: Self::Value, + ptr: Self::Value, align: Align, flags: MemFlags, - ) -> &'ll Value; + ) -> Self::Value; - fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value; - fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value; - fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value; + fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value; - fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; - fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value; - fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; + fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn icmp(&self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fcmp(&self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn empty_phi(&self, ty: &'ll Type) -> &'ll Value; - fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value; + fn empty_phi(&self, ty: Self::Type) -> Self::Value; + fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value; fn inline_asm_call( &self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], - output: &'ll Type, + inputs: &[Self::Value], + output: Self::Type, volatile: bool, alignstack: bool, dia: AsmDialect - ) -> &'ll Value; + ) -> Self::Value; - fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; - fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value; + fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn select( - &self, cond: &'ll Value, - then_val: &'ll Value, - else_val: &'ll Value, - ) -> &'ll Value; + &self, cond: Self::Value, + then_val: Self::Value, + else_val: Self::Value, + ) -> Self::Value; - fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value; - fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value; + fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value; + fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value; fn insert_element( - &self, vec: &'ll Value, - elt: &'ll Value, - idx: &'ll Value, - ) -> &'ll Value; - fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value; - fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value; - fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value; - fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value; - fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value; - fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value; - fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value; - fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value; + &self, vec: Self::Value, + elt: Self::Value, + idx: Self::Value, + ) -> Self::Value; + fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value; + fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value; + fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value; + fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value; + fn vector_reduce_add(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_and(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_or(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value; + fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value; + fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value; + fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value( &self, - agg_val: &'ll Value, - elt: &'ll Value, + agg_val: Self::Value, + elt: Self::Value, idx: u64 - ) -> &'ll Value; + ) -> Self::Value; fn landing_pad( &self, - ty: &'ll Type, - pers_fn: &'ll Value, + ty: Self::Type, + pers_fn: Self::Value, num_clauses: usize - ) -> &'ll Value; - fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value); - fn set_cleanup(&self, landing_pad: &'ll Value); - fn resume(&self, exn: &'ll Value) -> &'ll Value; + ) -> Self::Value; + fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value); + fn set_cleanup(&self, landing_pad: Self::Value); + fn resume(&self, exn: Self::Value) -> Self::Value; fn cleanup_pad( &self, - parent: Option<&'ll Value>, - args: &[&'ll Value] - ) -> &'ll Value; + parent: Option, + args: &[Self::Value] + ) -> Self::Value; fn cleanup_ret( - &self, cleanup: &'ll Value, - unwind: Option<&'ll BasicBlock>, - ) -> &'ll Value; + &self, cleanup: Self::Value, + unwind: Option, + ) -> Self::Value; fn catch_pad( &self, - parent: &'ll Value, - args: &[&'ll Value] - ) -> &'ll Value; - fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value; + parent: Self::Value, + args: &[Self::Value] + ) -> Self::Value; + fn catch_ret(&self, pad: Self::Value, unwind: Self::BasicBlock) -> Self::Value; fn catch_switch( &self, - parent: Option<&'ll Value>, - unwind: Option<&'ll BasicBlock>, + parent: Option, + unwind: Option, num_handlers: usize, - ) -> &'ll Value; - fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock); - fn set_personality_fn(&self, personality: &'ll Value); + ) -> Self::Value; + fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock); + fn set_personality_fn(&self, personality: Self::Value); fn atomic_cmpxchg( &self, - dst: &'ll Value, - cmp: &'ll Value, - src: &'ll Value, + dst: Self::Value, + cmp: Self::Value, + src: Self::Value, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool, - ) -> &'ll Value; + ) -> Self::Value; fn atomic_rmw( &self, op: AtomicRmwBinOp, - dst: &'ll Value, - src: &'ll Value, + dst: Self::Value, + src: Self::Value, order: AtomicOrdering, - ) -> &'ll Value; + ) -> Self::Value; fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope); - fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock); - fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock); - fn set_invariant_load(&self, load: &'ll Value); + fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); + fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); + fn set_invariant_load(&self, load: Self::Value); fn check_store( &self, - val: &'ll Value, - ptr: &'ll Value - ) -> &'ll Value; + val: Self::Value, + ptr: Self::Value + ) -> Self::Value; fn check_call<'b>( &self, typ: &str, - llfn: &'ll Value, - args: &'b [&'ll Value] - ) -> Cow<'b, [&'ll Value]>; - fn lifetime_start(&self, ptr: &'ll Value, size: Size); - fn lifetime_end(&self, ptr: &'ll Value, size: Size); + llfn: Self::Value, + args: &'b [Self::Value] + ) -> Cow<'b, [Self::Value]> where [Self::Value] : ToOwned; + fn lifetime_start(&self, ptr: Self::Value, size: Size); + fn lifetime_end(&self, ptr: Self::Value, size: Size); - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size); + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size); - fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value; - fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value; + fn call(&self, llfn: Self::Value, args: &[Self::Value], + bundle: Option<&OperandBundleDef<'ll, Self::Value>>) -> Self::Value; + fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; } From 17e318983e79c217db6625894f209d952a290d3f Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 23 Aug 2018 10:10:53 +0200 Subject: [PATCH 16/76] Removed parasite yaml file and put explicit lifetimes --- .atom-build.yml | 1 - src/librustc_codegen_llvm/builder.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 .atom-build.yml diff --git a/.atom-build.yml b/.atom-build.yml deleted file mode 100644 index a31bc877c9f5f..0000000000000 --- a/.atom-build.yml +++ /dev/null @@ -1 +0,0 @@ -cmd: ./x.py -i check diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 6c766dfa17fc4..eec4b3b806e15 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -32,7 +32,7 @@ pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V : 'll> { pub cx: &'a CodegenCx<'ll, 'tcx, V>, } -impl Drop for Builder<'_, '_, '_, V> { +impl Drop for Builder<'a, 'll, 'tcx, V> { fn drop(&mut self) { unsafe { llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); From 2680e1bacb4072caf22bcce3c6f0c9f9a92f3eae Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 23 Aug 2018 15:23:48 +0200 Subject: [PATCH 17/76] Use real type names rather than Self:: --- src/librustc_codegen_llvm/builder.rs | 318 +++++++++++++-------------- 1 file changed, 159 insertions(+), 159 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index eec4b3b806e15..29d1f4db59541 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,10 +9,10 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{self, False, OperandBundleDef}; +use llvm::{self, False, OperandBundleDef, BasicBlock}; use common::*; use type_; -use value; +use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; @@ -56,15 +56,15 @@ bitflags! { } impl BuilderMethods<'a, 'll, 'tcx> - for Builder<'a, 'll, 'tcx, &'ll value::Value> { + for Builder<'a, 'll, 'tcx, &'ll Value> { - type Value = &'ll value::Value; - type BasicBlock = &'ll llvm::BasicBlock; + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; type Type = &'ll type_::Type; fn new_block<'b>( - cx: &'a CodegenCx<'ll, 'tcx, Self::Value>, - llfn: Self::Value, + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, + llfn: &'ll Value, name: &'b str ) -> Self { let bx = Builder::with_cx(cx); @@ -80,7 +80,7 @@ impl BuilderMethods<'a, 'll, 'tcx> bx } - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, Self::Value>) -> Self { + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -103,13 +103,13 @@ impl BuilderMethods<'a, 'll, 'tcx> self.cx.tcx } - fn llfn(&self) -> Self::Value { + fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - fn llbb(&self) -> Self::BasicBlock { + fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } @@ -128,20 +128,20 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn set_value_name(&self, value: Self::Value, name: &str) { + fn set_value_name(&self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - fn position_at_end(&self, llbb: Self::BasicBlock) { + fn position_at_end(&self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - fn position_at_start(&self, llbb: Self::BasicBlock) { + fn position_at_start(&self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } @@ -154,14 +154,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn ret(&self, v: Self::Value) { + fn ret(&self, v: &'ll Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - fn br(&self, dest: Self::BasicBlock) { + fn br(&self, dest: &'ll BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); @@ -170,9 +170,9 @@ impl BuilderMethods<'a, 'll, 'tcx> fn cond_br( &self, - cond: Self::Value, - then_llbb: Self::BasicBlock, - else_llbb: Self::BasicBlock, + cond: &'ll Value, + then_llbb: &'ll BasicBlock, + else_llbb: &'ll BasicBlock, ) { self.count_insn("condbr"); unsafe { @@ -182,21 +182,21 @@ impl BuilderMethods<'a, 'll, 'tcx> fn switch( &self, - v: Self::Value, - else_llbb: Self::BasicBlock, + v: &'ll Value, + else_llbb: &'ll BasicBlock, num_cases: usize, - ) -> Self::Value { + ) -> &'ll Value { unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) } } fn invoke(&self, - llfn: Self::Value, - args: &[Self::Value], - then: Self::BasicBlock, - catch: Self::BasicBlock, - bundle: Option<&traits::OperandBundleDef<'ll, Self::Value>>) -> Self::Value { + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -226,21 +226,21 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Arithmetic */ - fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -249,21 +249,21 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -272,21 +272,21 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -296,42 +296,42 @@ impl BuilderMethods<'a, 'll, 'tcx> } - fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -340,28 +340,28 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -370,70 +370,70 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - fn neg(&self, v: Self::Value) -> Self::Value { + fn neg(&self, v: &'ll Value) -> &'ll Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - fn fneg(&self, v: Self::Value) -> Self::Value { + fn fneg(&self, v: &'ll Value) -> &'ll Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - fn not(&self, v: Self::Value) -> Self::Value { + fn not(&self, v: &'ll Value) -> &'ll Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value { + fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value { let bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -441,7 +441,7 @@ impl BuilderMethods<'a, 'll, 'tcx> bx.dynamic_alloca(ty, name, align) } - fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value { + fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -458,9 +458,9 @@ impl BuilderMethods<'a, 'll, 'tcx> fn array_alloca(&self, ty: Self::Type, - len: Self::Value, + len: &'ll Value, name: &str, - align: Align) -> Self::Value { + align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -475,7 +475,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn load(&self, ptr: Self::Value, align: Align) -> Self::Value { + fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -484,7 +484,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn volatile_load(&self, ptr: Self::Value) -> Self::Value { + fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -495,10 +495,10 @@ impl BuilderMethods<'a, 'll, 'tcx> fn atomic_load( &self, - ptr: Self::Value, + ptr: &'ll Value, order: traits::AtomicOrdering, align: Align - ) -> Self::Value { + ) -> &'ll Value { self.count_insn("load.atomic"); unsafe { let load = llvm::LLVMRustBuildAtomicLoad( @@ -539,18 +539,18 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn nonnull_metadata(&self, load: Self::Value) { + fn nonnull_metadata(&self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value { + fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - fn atomic_store(&self, val: Self::Value, ptr: Self::Value, + fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, order: traits::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); @@ -570,11 +570,11 @@ impl BuilderMethods<'a, 'll, 'tcx> fn store_with_flags( &self, - val: Self::Value, - ptr: Self::Value, + val: &'ll Value, + ptr: &'ll Value, align: Align, flags: MemFlags, - ) -> Self::Value { + ) -> &'ll Value { debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); self.count_insn("store"); let ptr = self.check_store(val, ptr); @@ -602,7 +602,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value { + fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -610,7 +610,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value { + fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -619,77 +619,77 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Casts */ - fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn trunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn sext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn fptoui(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn fptosi(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn uitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn sitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn fptrunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn fpext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn ptrtoint(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn inttoptr(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn bitcast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) @@ -697,14 +697,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } - fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value { + fn intcast(&self, val: &'ll Value, dest_ty: Self::Type, is_signed: bool) -> &'ll Value { self.count_insn("intcast"); unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn pointercast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("pointercast"); unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) @@ -712,7 +712,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Comparisons */ - fn icmp(&self, op: traits::IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); let op = llvm::IntPredicate::from_generic(op); unsafe { @@ -720,7 +720,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn fcmp(&self, op: traits::RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn fcmp(&self, op: traits::RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -728,14 +728,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Miscellaneous instructions */ - fn empty_phi(&self, ty: Self::Type) -> Self::Value { + fn empty_phi(&self, ty: Self::Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value { + fn phi(&self, ty: Self::Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -748,9 +748,9 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[Self::Value], output: Self::Type, + inputs: &[&'ll Value], output: Self::Type, volatile: bool, alignstack: bool, - dia: syntax::ast::AsmDialect) -> Option { + dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -780,14 +780,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value { + fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -796,10 +796,10 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn select( - &self, cond: Self::Value, - then_val: Self::Value, - else_val: Self::Value, - ) -> Self::Value { + &self, cond: &'ll Value, + then_val: &'ll Value, + else_val: &'ll Value, + ) -> &'ll Value { self.count_insn("select"); unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) @@ -807,14 +807,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } #[allow(dead_code)] - fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value { + fn va_arg(&self, list: &'ll Value, ty: Self::Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value { + fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) @@ -822,24 +822,24 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn insert_element( - &self, vec: Self::Value, - elt: Self::Value, - idx: Self::Value, - ) -> Self::Value { + &self, vec: &'ll Value, + elt: &'ll Value, + idx: &'ll Value, + ) -> &'ll Value { self.count_insn("insertelement"); unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) } } - fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value { + fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value { + fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = val_ty(elt); let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); @@ -849,7 +849,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value { + fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once @@ -861,7 +861,7 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value { + fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once @@ -873,56 +873,56 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_add(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.add"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") } } - fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.mul"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") } } - fn vector_reduce_and(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.and"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") } } - fn vector_reduce_or(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.or"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") } } - fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.xor"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") } } - fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") } } - fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") } } - fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) @@ -931,7 +931,7 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value { + fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) @@ -940,14 +940,14 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value { + fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.min"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") } } - fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value { + fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.max"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); @@ -955,7 +955,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value { + fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -963,8 +963,8 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn insert_value(&self, agg_val: Self::Value, elt: Self::Value, - idx: u64) -> Self::Value { + fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + idx: u64) -> &'ll Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -973,8 +973,8 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn landing_pad(&self, ty: Self::Type, pers_fn: Self::Value, - num_clauses: usize) -> Self::Value { + fn landing_pad(&self, ty: Self::Type, pers_fn: &'ll Value, + num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, @@ -982,20 +982,20 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value) { + fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - fn set_cleanup(&self, landing_pad: Self::Value) { + fn set_cleanup(&self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - fn resume(&self, exn: Self::Value) -> Self::Value { + fn resume(&self, exn: &'ll Value) -> &'ll Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) @@ -1003,8 +1003,8 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn cleanup_pad(&self, - parent: Option, - args: &[Self::Value]) -> Self::Value { + parent: Option<&'ll Value>, + args: &[&'ll Value]) -> &'ll Value { self.count_insn("cleanuppad"); let name = const_cstr!("cleanuppad"); let ret = unsafe { @@ -1018,9 +1018,9 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn cleanup_ret( - &self, cleanup: Self::Value, - unwind: Option, - ) -> Self::Value { + &self, cleanup: &'ll Value, + unwind: Option<&'ll BasicBlock>, + ) -> &'ll Value { self.count_insn("cleanupret"); let ret = unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind) @@ -1029,8 +1029,8 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn catch_pad(&self, - parent: Self::Value, - args: &[Self::Value]) -> Self::Value { + parent: &'ll Value, + args: &[&'ll Value]) -> &'ll Value { self.count_insn("catchpad"); let name = const_cstr!("catchpad"); let ret = unsafe { @@ -1041,7 +1041,7 @@ impl BuilderMethods<'a, 'll, 'tcx> ret.expect("LLVM does not have support for catchpad") } - fn catch_ret(&self, pad: Self::Value, unwind: Self::BasicBlock) -> Self::Value { + fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { self.count_insn("catchret"); let ret = unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) @@ -1051,10 +1051,10 @@ impl BuilderMethods<'a, 'll, 'tcx> fn catch_switch( &self, - parent: Option, - unwind: Option, + parent: Option<&'ll Value>, + unwind: Option<&'ll BasicBlock>, num_handlers: usize, - ) -> Self::Value { + ) -> &'ll Value { self.count_insn("catchswitch"); let name = const_cstr!("catchswitch"); let ret = unsafe { @@ -1065,13 +1065,13 @@ impl BuilderMethods<'a, 'll, 'tcx> ret.expect("LLVM does not have support for catchswitch") } - fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock) { + fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - fn set_personality_fn(&self, personality: Self::Value) { + fn set_personality_fn(&self, personality: &'ll Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } @@ -1080,13 +1080,13 @@ impl BuilderMethods<'a, 'll, 'tcx> // Atomic Operations fn atomic_cmpxchg( &self, - dst: Self::Value, - cmp: Self::Value, - src: Self::Value, + dst: &'ll Value, + cmp: &'ll Value, + src: &'ll Value, order: traits::AtomicOrdering, failure_order: traits::AtomicOrdering, weak: bool, - ) -> Self::Value { + ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; unsafe { llvm::LLVMRustBuildAtomicCmpXchg( @@ -1103,10 +1103,10 @@ impl BuilderMethods<'a, 'll, 'tcx> fn atomic_rmw( &self, op: traits::AtomicRmwBinOp, - dst: Self::Value, - src: Self::Value, + dst: &'ll Value, + src: &'ll Value, order: traits::AtomicOrdering, - ) -> Self::Value { + ) -> &'ll Value { unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, @@ -1128,20 +1128,20 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock) { + fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock) { + fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - fn set_invariant_load(&self, load: Self::Value) { + fn set_invariant_load(&self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); @@ -1150,8 +1150,8 @@ impl BuilderMethods<'a, 'll, 'tcx> /// Returns the ptr value that should be used for storing `val`. fn check_store<'b>(&self, - val: Self::Value, - ptr: Self::Value) -> Self::Value { + val: &'ll Value, + ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = val_ty(ptr); let stored_ty = val_ty(val); let stored_ptr_ty = stored_ty.ptr_to(); @@ -1171,8 +1171,8 @@ impl BuilderMethods<'a, 'll, 'tcx> /// Returns the args that should be used for a call to `llfn`. fn check_call<'b>(&self, typ: &str, - llfn: Self::Value, - args: &'b [Self::Value]) -> Cow<'b, [Self::Value]> { + llfn: &'ll Value, + args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { let mut fn_ty = val_ty(llfn); // Strip off pointers while fn_ty.kind() == llvm::TypeKind::Pointer { @@ -1211,11 +1211,11 @@ impl BuilderMethods<'a, 'll, 'tcx> Cow::Owned(casted_args) } - fn lifetime_start(&self, ptr: Self::Value, size: Size) { + fn lifetime_start(&self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - fn lifetime_end(&self, ptr: Self::Value, size: Size) { + fn lifetime_end(&self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } @@ -1227,7 +1227,7 @@ impl BuilderMethods<'a, 'll, 'tcx> /// /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size) { + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; } @@ -1243,8 +1243,8 @@ impl BuilderMethods<'a, 'll, 'tcx> self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); } - fn call(&self, llfn: Self::Value, args: &[Self::Value], - bundle: Option<&traits::OperandBundleDef<'ll, Self::Value>>) -> Self::Value { + fn call(&self, llfn: &'ll Value, args: &[&'ll Value], + bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { self.count_insn("call"); debug!("Call {:?} with args ({:?})", @@ -1265,14 +1265,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + fn zext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { self.count_insn("zext"); unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) } } - fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value { + fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("structgep"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -1280,7 +1280,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, Self::Value> { + fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value> { &self.cx } } From 2d8ccf2c9dbf704cd0d5d8504db4e8caf0df8eaf Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 23 Aug 2018 15:11:09 +0200 Subject: [PATCH 18/76] Fixed borrow-checker deficiency at stage 1 --- src/librustc_codegen_llvm/builder.rs | 71 ++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 29d1f4db59541..b28f8ab0516a0 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -204,17 +204,34 @@ impl BuilderMethods<'a, 'll, 'tcx> args); let args = self.check_call("invoke", llfn, args); - let bundle = bundle.map(|b| &*(OperandBundleDef::from_generic(b)).raw); - - unsafe { - llvm::LLVMRustBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - noname()) + match bundle { + Some(b) => { + let llvm_bundle = OperandBundleDef::from_generic(b); + unsafe { + llvm::LLVMRustBuildInvoke( + self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + Some(&*(llvm_bundle.raw)), + noname() + ) + } + } + None => unsafe { + llvm::LLVMRustBuildInvoke( + self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + None, + noname() + ) + } } } @@ -1252,16 +1269,28 @@ impl BuilderMethods<'a, 'll, 'tcx> args); let args = self.check_call("call", llfn, args); - let bundle = bundle.map(|b| &*(OperandBundleDef::from_generic(b)).raw); - - unsafe { - llvm::LLVMRustBuildCall( - self.llbuilder, - llfn, - args.as_ptr() as *const &llvm::Value, - args.len() as c_uint, - bundle, noname() - ) + match bundle { + Some(b) => { + let bundle = OperandBundleDef::from_generic(b); + unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + Some(&*(bundle.raw)), noname() + ) + } + } + None => unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + None, noname() + ) + } } } From 28ee44af0d7f70c29d7d0fe2cb36a4b65e946eab Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 28 Aug 2018 11:11:01 +0200 Subject: [PATCH 19/76] New files and folders for traits Moved common enums to common --- src/librustc_codegen_llvm/abi.rs | 2 +- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/base.rs | 4 +- src/librustc_codegen_llvm/builder.rs | 26 ++--- src/librustc_codegen_llvm/common.rs | 83 +++++++++++++- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 2 +- .../debuginfo/source_loc.rs | 2 +- src/librustc_codegen_llvm/glue.rs | 2 +- .../{traits.rs => interfaces/builder.rs} | 81 ------------- src/librustc_codegen_llvm/interfaces/mod.rs | 13 +++ src/librustc_codegen_llvm/intrinsic.rs | 4 +- src/librustc_codegen_llvm/lib.rs | 2 +- src/librustc_codegen_llvm/llvm/ffi.rs | 106 +++++++++--------- src/librustc_codegen_llvm/llvm/mod.rs | 4 +- src/librustc_codegen_llvm/meth.rs | 2 +- src/librustc_codegen_llvm/mir/block.rs | 4 +- src/librustc_codegen_llvm/mir/constant.rs | 2 +- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 2 +- src/librustc_codegen_llvm/mir/place.rs | 4 +- src/librustc_codegen_llvm/mir/rvalue.rs | 7 +- 22 files changed, 187 insertions(+), 171 deletions(-) rename src/librustc_codegen_llvm/{traits.rs => interfaces/builder.rs} (89%) create mode 100644 src/librustc_codegen_llvm/interfaces/mod.rs diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 20b56be2482c9..6025b59482158 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -19,7 +19,7 @@ use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty}; diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 3e2eb47a2ee83..8b7869f71556f 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -16,7 +16,7 @@ use builder::Builder; use value::Value; use rustc::hir; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use mir::place::PlaceRef; use mir::operand::OperandValue; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 8e5667ace5b21..ae0b8e28c7f6e 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -56,7 +56,7 @@ use callee; use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, C_struct_in_context, C_array, val_ty}; +use common::{self, C_struct_in_context, C_array, val_ty, IntPredicate, RealPredicate}; use consts; use context::CodegenCx; use debuginfo; @@ -75,7 +75,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use traits::{IntPredicate, RealPredicate, BuilderMethods}; +use interfaces::BuilderMethods; use std::any::Any; use std::ffi::CString; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index b28f8ab0516a0..f3e74d4f94e0f 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -10,7 +10,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; -use common::*; +use common::{self, *}; use type_; use value::Value; use libc::{c_uint, c_char}; @@ -18,7 +18,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use traits::{self, BuilderMethods}; +use interfaces::BuilderMethods; use syntax; use std::borrow::Cow; @@ -196,7 +196,7 @@ impl BuilderMethods<'a, 'll, 'tcx> args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, - bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { + bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -513,7 +513,7 @@ impl BuilderMethods<'a, 'll, 'tcx> fn atomic_load( &self, ptr: &'ll Value, - order: traits::AtomicOrdering, + order: common::AtomicOrdering, align: Align ) -> &'ll Value { self.count_insn("load.atomic"); @@ -568,7 +568,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: traits::AtomicOrdering, align: Align) { + order: common::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); @@ -729,7 +729,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Comparisons */ - fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); let op = llvm::IntPredicate::from_generic(op); unsafe { @@ -737,7 +737,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn fcmp(&self, op: traits::RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -1100,8 +1100,8 @@ impl BuilderMethods<'a, 'll, 'tcx> dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: traits::AtomicOrdering, - failure_order: traits::AtomicOrdering, + order: common::AtomicOrdering, + failure_order: common::AtomicOrdering, weak: bool, ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; @@ -1119,10 +1119,10 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_rmw( &self, - op: traits::AtomicRmwBinOp, + op: common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: traits::AtomicOrdering, + order: common::AtomicOrdering, ) -> &'ll Value { unsafe { llvm::LLVMBuildAtomicRMW( @@ -1135,7 +1135,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn atomic_fence(&self, order: traits::AtomicOrdering, scope: traits::SynchronizationScope) { + fn atomic_fence(&self, order: common::AtomicOrdering, scope: common::SynchronizationScope) { unsafe { llvm::LLVMRustBuildAtomicFence( self.llbuilder, @@ -1261,7 +1261,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { + bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { self.count_insn("call"); debug!("Call {:?} with args ({:?})", diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 7c24f9fe42191..2bd758e0d9623 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -28,7 +28,7 @@ use value::Value; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::hir; -use traits::{BuilderMethods, OperandBundleDef}; +use interfaces::BuilderMethods; use libc::{c_uint, c_char}; use std::iter; @@ -51,6 +51,87 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) } +pub struct OperandBundleDef<'a, Value : 'a> { + pub name: &'a str, + pub val: Value +} + +impl OperandBundleDef<'ll, &'ll Value> { + pub fn new(name: &'ll str, val: &'ll Value) -> Self { + OperandBundleDef { + name, + val + } + } +} + +pub enum IntPredicate { + IntEQ, + IntNE, + IntUGT, + IntUGE, + IntULT, + IntULE, + IntSGT, + IntSGE, + IntSLT, + IntSLE +} + +#[allow(dead_code)] +pub enum RealPredicate { + RealPredicateFalse, + RealOEQ, + RealOGT, + RealOGE, + RealOLT, + RealOLE, + RealONE, + RealORD, + RealUNO, + RealUEQ, + RealUGT, + RealUGE, + RealULT, + RealULE, + RealUNE, + RealPredicateTrue +} + +pub enum AtomicRmwBinOp { + AtomicXchg, + AtomicAdd, + AtomicSub, + AtomicAnd, + AtomicNand, + AtomicOr, + AtomicXor, + AtomicMax, + AtomicMin, + AtomicUMax, + AtomicUMin +} + +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic, + Unordered, + Monotonic, + // Consume, // Not specified yet. + Acquire, + Release, + AcquireRelease, + SequentiallyConsistent, +} + +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 4a546f542977b..047d8bbdd3245 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -18,7 +18,7 @@ use declare; use rustc::session::config::DebugInfo; use type_::Type; use value::Value; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use syntax::attr; diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 455e8d998623a..87ffde0934be5 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -45,7 +45,7 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; use rustc::ty::layout::{self, LayoutOf}; -use traits::BuilderMethods; +use interfaces::BuilderMethods; pub mod gdb; mod utils; diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index f4feabd2a92b2..c6698f2451a12 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -17,7 +17,7 @@ use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use libc::c_uint; use syntax_pos::{Span, Pos}; diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index a5af841d9bdba..7c225c5a1c7ee 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -20,7 +20,7 @@ use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -use traits::{IntPredicate,BuilderMethods}; +use interfaces::BuilderMethods; pub fn size_and_align_of_dst( bx: &Builder<'_, 'll, 'tcx, &'ll Value>, diff --git a/src/librustc_codegen_llvm/traits.rs b/src/librustc_codegen_llvm/interfaces/builder.rs similarity index 89% rename from src/librustc_codegen_llvm/traits.rs rename to src/librustc_codegen_llvm/interfaces/builder.rs index 02efcbe8f8d42..3de6dc7cbe718 100644 --- a/src/librustc_codegen_llvm/traits.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -14,92 +14,11 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::Session; use builder::MemFlags; -use value::Value; use std::borrow::Cow; use std::ops::Range; use syntax::ast::AsmDialect; -pub struct OperandBundleDef<'a, Value : 'a> { - pub name: &'a str, - pub val: Value -} - -impl OperandBundleDef<'ll, &'ll Value> { - pub fn new(name: &'ll str, val: &'ll Value) -> Self { - OperandBundleDef { - name, - val - } - } -} - -pub enum IntPredicate { - IntEQ, - IntNE, - IntUGT, - IntUGE, - IntULT, - IntULE, - IntSGT, - IntSGE, - IntSLT, - IntSLE -} - -#[allow(dead_code)] -pub enum RealPredicate { - RealPredicateFalse, - RealOEQ, - RealOGT, - RealOGE, - RealOLT, - RealOLE, - RealONE, - RealORD, - RealUNO, - RealUEQ, - RealUGT, - RealUGE, - RealULT, - RealULE, - RealUNE, - RealPredicateTrue -} - -pub enum AtomicRmwBinOp { - AtomicXchg, - AtomicAdd, - AtomicSub, - AtomicAnd, - AtomicNand, - AtomicOr, - AtomicXor, - AtomicMax, - AtomicMin, - AtomicUMax, - AtomicUMin -} - -pub enum AtomicOrdering { - #[allow(dead_code)] - NotAtomic, - Unordered, - Monotonic, - // Consume, // Not specified yet. - Acquire, - Release, - AcquireRelease, - SequentiallyConsistent, -} - -pub enum SynchronizationScope { - // FIXME: figure out if this variant is needed at all. - #[allow(dead_code)] - Other, - SingleThread, - CrossThread, -} pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs new file mode 100644 index 0000000000000..d0cd8e6a696ed --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -0,0 +1,13 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod builder; + +pub use self::builder::BuilderMethods; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index a1f047f638e5a..c8ec4fb2144fb 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -30,7 +30,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use traits::{BuilderMethods, AtomicRmwBinOp, SynchronizationScope}; +use interfaces::BuilderMethods; use rustc::session::Session; use syntax_pos::Span; @@ -416,7 +416,7 @@ pub fn codegen_intrinsic_call( // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst name if name.starts_with("atomic_") => { - use traits::AtomicOrdering::*; + use self::AtomicOrdering::*; let split: Vec<&str> = name.split('_').collect(); diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 185973c090555..57b4f8dfbbcb6 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -106,7 +106,7 @@ mod back { pub mod wasm; } -mod traits; +mod interfaces; mod abi; mod allocator; diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index cf723e184f623..4c04f7c06090a 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,7 +19,7 @@ use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; -use traits; +use common; use syntax; use super::RustString; @@ -144,18 +144,18 @@ pub enum IntPredicate { } impl IntPredicate { - pub fn from_generic(intpre: traits::IntPredicate) -> Self { + pub fn from_generic(intpre: common::IntPredicate) -> Self { match intpre { - traits::IntPredicate::IntEQ => IntPredicate::IntEQ, - traits::IntPredicate::IntNE => IntPredicate::IntNE, - traits::IntPredicate::IntUGT => IntPredicate::IntUGT, - traits::IntPredicate::IntUGE => IntPredicate::IntUGE, - traits::IntPredicate::IntULT => IntPredicate::IntULT, - traits::IntPredicate::IntULE => IntPredicate::IntULE, - traits::IntPredicate::IntSGT => IntPredicate::IntSGT, - traits::IntPredicate::IntSGE => IntPredicate::IntSGE, - traits::IntPredicate::IntSLT => IntPredicate::IntSLT, - traits::IntPredicate::IntSLE => IntPredicate::IntSLE, + common::IntPredicate::IntEQ => IntPredicate::IntEQ, + common::IntPredicate::IntNE => IntPredicate::IntNE, + common::IntPredicate::IntUGT => IntPredicate::IntUGT, + common::IntPredicate::IntUGE => IntPredicate::IntUGE, + common::IntPredicate::IntULT => IntPredicate::IntULT, + common::IntPredicate::IntULE => IntPredicate::IntULE, + common::IntPredicate::IntSGT => IntPredicate::IntSGT, + common::IntPredicate::IntSGE => IntPredicate::IntSGE, + common::IntPredicate::IntSLT => IntPredicate::IntSLT, + common::IntPredicate::IntSLE => IntPredicate::IntSLE, } } } @@ -183,24 +183,24 @@ pub enum RealPredicate { } impl RealPredicate { - pub fn from_generic(realpred: traits::RealPredicate) -> Self { + pub fn from_generic(realpred: common::RealPredicate) -> Self { match realpred { - traits::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, - traits::RealPredicate::RealOEQ => RealPredicate::RealOEQ, - traits::RealPredicate::RealOGT => RealPredicate::RealOGT, - traits::RealPredicate::RealOGE => RealPredicate::RealOGE, - traits::RealPredicate::RealOLT => RealPredicate::RealOLT, - traits::RealPredicate::RealOLE => RealPredicate::RealOLE, - traits::RealPredicate::RealONE => RealPredicate::RealONE, - traits::RealPredicate::RealORD => RealPredicate::RealORD, - traits::RealPredicate::RealUNO => RealPredicate::RealUNO, - traits::RealPredicate::RealUEQ => RealPredicate::RealUEQ, - traits::RealPredicate::RealUGT => RealPredicate::RealUGT, - traits::RealPredicate::RealUGE => RealPredicate::RealUGE, - traits::RealPredicate::RealULT => RealPredicate::RealULT, - traits::RealPredicate::RealULE => RealPredicate::RealULE, - traits::RealPredicate::RealUNE => RealPredicate::RealUNE, - traits::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue + common::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, + common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + common::RealPredicate::RealOGT => RealPredicate::RealOGT, + common::RealPredicate::RealOGE => RealPredicate::RealOGE, + common::RealPredicate::RealOLT => RealPredicate::RealOLT, + common::RealPredicate::RealOLE => RealPredicate::RealOLE, + common::RealPredicate::RealONE => RealPredicate::RealONE, + common::RealPredicate::RealORD => RealPredicate::RealORD, + common::RealPredicate::RealUNO => RealPredicate::RealUNO, + common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + common::RealPredicate::RealUGT => RealPredicate::RealUGT, + common::RealPredicate::RealUGE => RealPredicate::RealUGE, + common::RealPredicate::RealULT => RealPredicate::RealULT, + common::RealPredicate::RealULE => RealPredicate::RealULE, + common::RealPredicate::RealUNE => RealPredicate::RealUNE, + common::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue } } } @@ -246,19 +246,19 @@ pub enum AtomicRmwBinOp { } impl AtomicRmwBinOp { - pub fn from_generic(op : traits::AtomicRmwBinOp) -> Self { + pub fn from_generic(op : common::AtomicRmwBinOp) -> Self { match op { - traits::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, - traits::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, - traits::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, - traits::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, - traits::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, - traits::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, - traits::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, - traits::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, - traits::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, - traits::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, - traits::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin } } } @@ -279,15 +279,15 @@ pub enum AtomicOrdering { } impl AtomicOrdering { - pub fn from_generic(ao : traits::AtomicOrdering) -> Self { + pub fn from_generic(ao : common::AtomicOrdering) -> Self { match ao { - traits::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, - traits::AtomicOrdering::Unordered => AtomicOrdering::Unordered, - traits::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, - traits::AtomicOrdering::Acquire => AtomicOrdering::Acquire, - traits::AtomicOrdering::Release => AtomicOrdering::Release, - traits::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, - traits::AtomicOrdering::SequentiallyConsistent => + common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + common::AtomicOrdering::Release => AtomicOrdering::Release, + common::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, + common::AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent } } @@ -306,11 +306,11 @@ pub enum SynchronizationScope { } impl SynchronizationScope { - pub fn from_generic(sc : traits::SynchronizationScope) -> Self { + pub fn from_generic(sc : common::SynchronizationScope) -> Self { match sc { - traits::SynchronizationScope::Other => SynchronizationScope::Other, - traits::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, - traits::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, + common::SynchronizationScope::Other => SynchronizationScope::Other, + common::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, + common::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, } } } diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index c01a625806bd4..63a74cb6d6d50 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -28,7 +28,7 @@ use std::ffi::CStr; use std::cell::RefCell; use libc::{self, c_uint, c_char, size_t}; use rustc_data_structures::small_c_str::SmallCStr; -use traits; +use common; pub mod archive_ro; pub mod diagnostic; @@ -273,7 +273,7 @@ impl OperandBundleDef<'a> { OperandBundleDef { raw: def } } - pub fn from_generic(bundle : &traits::OperandBundleDef<'a, &'a Value>) -> Self { + pub fn from_generic(bundle : &common::OperandBundleDef<'a, &'a Value>) -> Self { Self::new(bundle.name, &[bundle.val]) } } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 96285d3c28648..8db57797eb682 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -17,7 +17,7 @@ use monomorphize; use type_::Type; use value::Value; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 53e5841df29ae..3f78d45c0a318 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -18,7 +18,7 @@ use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; use base; use callee; use builder::{Builder, MemFlags}; -use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef}; +use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef, IntPredicate}; use consts; use meth; use monomorphize; @@ -26,7 +26,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use traits::{IntPredicate,BuilderMethods}; +use interfaces::BuilderMethods; use syntax::symbol::Symbol; use syntax_pos::Pos; diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index d0a5b320ecc36..992bcfa86b067 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -27,7 +27,7 @@ use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use super::super::callee; use super::FunctionCx; diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 5d318d32f7965..3490361f4397d 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -25,7 +25,7 @@ use monomorphize::Instance; use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; use type_::Type; use value::Value; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 050f9a1990123..13eddae6c3ac5 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -22,7 +22,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use glue; -use traits::BuilderMethods; +use interfaces::BuilderMethods; use std::fmt; diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index b414be19b2c78..9f1da16976797 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -15,7 +15,7 @@ use rustc::mir; use rustc::mir::tcx::PlaceTy; use base; use builder::Builder; -use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big}; +use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big, IntPredicate}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -23,7 +23,7 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use traits::{IntPredicate,BuilderMethods}; +use interfaces::BuilderMethods; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 0439b577ba7ae..dd8b92afa7259 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -20,14 +20,17 @@ use base; use builder::Builder; use callee; use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big}; +use common::{ + C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, + C_uint, C_uint_big, IntPredicate, RealPredicate +}; use consts; use monomorphize; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use traits::{IntPredicate, RealPredicate, BuilderMethods}; +use interfaces::BuilderMethods; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; From 78fbb2d1f275d4cceb7b5b62a34b6851387ada81 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 28 Aug 2018 11:40:34 +0200 Subject: [PATCH 20/76] New Backend trait containing associated types --- src/librustc_codegen_llvm/builder.rs | 12 +++++++----- src/librustc_codegen_llvm/interfaces/backend.rs | 15 +++++++++++++++ src/librustc_codegen_llvm/interfaces/builder.rs | 6 ++---- src/librustc_codegen_llvm/interfaces/mod.rs | 2 ++ 4 files changed, 26 insertions(+), 9 deletions(-) create mode 100644 src/librustc_codegen_llvm/interfaces/backend.rs diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index f3e74d4f94e0f..81ff6f77a458f 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -18,7 +18,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, Backend}; use syntax; use std::borrow::Cow; @@ -55,13 +55,15 @@ bitflags! { } } +impl Backend for Builder<'a, 'll, 'tcx, &'ll Value> { + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; + type Type = &'ll type_::Type; +} + impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { - type Value = &'ll Value; - type BasicBlock = &'ll BasicBlock; - type Type = &'ll type_::Type; - fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, llfn: &'ll Value, diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs new file mode 100644 index 0000000000000..b2a6bf2dd8c0a --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -0,0 +1,15 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub trait Backend { + type Value; + type BasicBlock; + type Type; +} diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 3de6dc7cbe718..a0f6f749d27bd 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -14,6 +14,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::Session; use builder::MemFlags; +use super::backend::Backend; use std::borrow::Cow; use std::ops::Range; @@ -21,10 +22,7 @@ use syntax::ast::AsmDialect; -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { - type Value; - type BasicBlock; - type Type; +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : Backend { fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, Self::Value>, diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index d0cd8e6a696ed..b9a356874ba96 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -9,5 +9,7 @@ // except according to those terms. mod builder; +mod backend; pub use self::builder::BuilderMethods; +pub use self::backend::Backend; From dfa4f77f8fd6e96fef0e12a6f746ed49017d42e7 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 28 Aug 2018 17:03:46 +0200 Subject: [PATCH 21/76] Traitification of common.rs methods --- src/librustc_codegen_llvm/abi.rs | 6 +- src/librustc_codegen_llvm/asm.rs | 6 +- src/librustc_codegen_llvm/back/link.rs | 2 +- src/librustc_codegen_llvm/back/write.rs | 12 +- src/librustc_codegen_llvm/base.rs | 35 +- src/librustc_codegen_llvm/builder.rs | 32 +- src/librustc_codegen_llvm/callee.rs | 5 +- src/librustc_codegen_llvm/common.rs | 359 +++++++++--------- src/librustc_codegen_llvm/consts.rs | 12 +- src/librustc_codegen_llvm/debuginfo/gdb.rs | 8 +- src/librustc_codegen_llvm/glue.rs | 23 +- .../interfaces/backend.rs | 1 + .../interfaces/common.rs | 61 +++ src/librustc_codegen_llvm/interfaces/mod.rs | 2 + src/librustc_codegen_llvm/intrinsic.rs | 86 +++-- src/librustc_codegen_llvm/llvm/ffi.rs | 2 +- src/librustc_codegen_llvm/meth.rs | 22 +- src/librustc_codegen_llvm/mir/block.rs | 38 +- src/librustc_codegen_llvm/mir/constant.rs | 19 +- src/librustc_codegen_llvm/mir/mod.rs | 7 +- src/librustc_codegen_llvm/mir/operand.rs | 12 +- src/librustc_codegen_llvm/mir/place.rs | 58 +-- src/librustc_codegen_llvm/mir/rvalue.rs | 65 ++-- src/librustc_codegen_llvm/type_.rs | 2 +- 24 files changed, 483 insertions(+), 392 deletions(-) create mode 100644 src/librustc_codegen_llvm/interfaces/common.rs diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 6025b59482158..9b98888ddc37b 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -11,7 +11,7 @@ use llvm::{self, AttributePlace}; use base; use builder::{Builder, MemFlags}; -use common::{ty_fn_sig, C_usize}; +use common::ty_fn_sig; use context::CodegenCx; use mir::place::PlaceRef; use mir::operand::OperandValue; @@ -19,7 +19,7 @@ use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty}; @@ -242,7 +242,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { base::call_memcpy(bx, bx.pointercast(dst.llval, Type::i8p(cx)), bx.pointercast(llscratch, Type::i8p(cx)), - C_usize(cx, self.layout.size.bytes()), + CodegenCx::c_usize(cx, self.layout.size.bytes()), self.layout.align.min(scratch_align), MemFlags::empty()); diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 8b7869f71556f..d9424293689cd 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -9,14 +9,14 @@ // except according to those terms. use llvm; -use common::*; +use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; use builder::Builder; use value::Value; use rustc::hir; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use mir::place::PlaceRef; use mir::operand::OperandValue; @@ -111,7 +111,7 @@ pub fn codegen_inline_asm( let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx, key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = CodegenCx::c_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1)); diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 86c6a5e65b0e9..8993937ebb2b5 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -748,7 +748,7 @@ fn link_natively(sess: &Session, // with some thread pool working in the background. It seems that no one // currently knows a fix for this so in the meantime we're left with this... info!("{:?}", &cmd); - let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok(); + let retry_on_segfault = env::var("RUSTc_RETRY_LINKER_ON_SEGFAULT").is_ok(); let mut prog; let mut i = 0; loop { diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 81619c219757b..171c0de3f2724 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -45,8 +45,8 @@ use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use syntax_pos::symbol::Symbol; use type_::Type; -use context::{is_pie_binary, get_reloc_model}; -use common::{C_bytes_in_context, val_ty}; +use context::{is_pie_binary, get_reloc_model, CodegenCx}; +use interfaces::CommonMethods; use jobserver::{Client, Acquired}; use rustc_demangle; @@ -889,10 +889,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = CodegenCx::c_bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + CodegenCx::val_ty(llconst), "rustc.embedded.module\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -909,10 +909,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = C_bytes_in_context(llcx, &[]); + let llconst = CodegenCx::c_bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + CodegenCx::val_ty(llconst), "rustc.embedded.cmdline\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index ae0b8e28c7f6e..f01aade991e60 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -53,10 +53,9 @@ use mir::place::PlaceRef; use attributes; use builder::{Builder, MemFlags}; use callee; -use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, C_struct_in_context, C_array, val_ty, IntPredicate, RealPredicate}; +use common::{self, IntPredicate, RealPredicate}; use consts; use context::CodegenCx; use debuginfo; @@ -75,7 +74,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use std::any::Any; use std::ffi::CString; @@ -200,7 +199,7 @@ pub fn unsized_info( let (source, target) = cx.tcx.struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { (&ty::Array(_, len), &ty::Slice(_)) => { - C_usize(cx, len.unwrap_usize(cx.tcx)) + CodegenCx::c_usize(cx, len.unwrap_usize(cx.tcx)) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker @@ -352,8 +351,8 @@ fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, { // Shifts may have any size int on the rhs if op.is_shift() { - let mut rhs_llty = val_ty(rhs); - let mut lhs_llty = val_ty(lhs); + let mut rhs_llty = CodegenCx::val_ty(rhs); + let mut lhs_llty = CodegenCx::val_ty(lhs); if rhs_llty.kind() == TypeKind::Vector { rhs_llty = rhs_llty.element_type() } @@ -394,7 +393,7 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( bx: &Builder<'_ ,'ll, '_, &'ll Value>, val: &'ll Value ) -> &'ll Value { - if val_ty(val) == Type::i1(bx.cx()) { + if CodegenCx::val_ty(val) == Type::i1(bx.cx()) { bx.zext(val, Type::i8(bx.cx())) } else { val @@ -434,7 +433,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = bx.load(src, align); - let ptr = bx.pointercast(dst, val_ty(val).ptr_to()); + let ptr = bx.pointercast(dst, CodegenCx::val_ty(val).ptr_to()); bx.store_with_flags(val, ptr, align, flags); return; } @@ -445,8 +444,8 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( let src_ptr = bx.pointercast(src, Type::i8p(cx)); let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); let size = bx.intcast(n_bytes, cx.isize_ty, false); - let align = C_i32(cx, align.abi() as i32); - let volatile = C_bool(cx, flags.contains(MemFlags::VOLATILE)); + let align = CodegenCx::c_i32(cx, align.abi() as i32); + let volatile = CodegenCx::c_bool(cx, flags.contains(MemFlags::VOLATILE)); bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } @@ -463,7 +462,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>( return; } - call_memcpy(bx, dst, src, C_usize(bx.cx(), size), align, flags); + call_memcpy(bx, dst, src, CodegenCx::c_usize(bx.cx(), size), align, flags); } pub fn call_memset( @@ -477,7 +476,7 @@ pub fn call_memset( let ptr_width = &bx.cx.sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key); - let volatile = C_bool(bx.cx, volatile); + let volatile = CodegenCx::c_bool(bx.cx, volatile); bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } @@ -653,12 +652,12 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = C_bytes_in_context(metadata_llcx, &compressed); - let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = CodegenCx::c_bytes_in_context(metadata_llcx, &compressed); + let llconst = CodegenCx::c_struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { - llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst), buf.as_ptr()) + llvm::LLVMAddGlobal(metadata_llmod, CodegenCx::val_ty(llconst), buf.as_ptr()) }; unsafe { llvm::LLVMSetInitializer(llglobal, llconst); @@ -1237,7 +1236,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Run replace-all-uses-with for statics that need it for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, val_ty(old_g)); + let bitcast = llvm::LLVMConstPointerCast(new_g, CodegenCx::val_ty(old_g)); llvm::LLVMReplaceAllUsesWith(old_g, bitcast); llvm::LLVMDeleteGlobal(old_g); } @@ -1248,11 +1247,11 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if !cx.used_statics.borrow().is_empty() { let name = const_cstr!("llvm.used"); let section = const_cstr!("llvm.metadata"); - let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); + let array = CodegenCx::c_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); unsafe { let g = llvm::LLVMAddGlobal(cx.llmod, - val_ty(array), + CodegenCx::val_ty(array), name.as_ptr()); llvm::LLVMSetInitializer(g, array); llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 81ff6f77a458f..d85e8077aaa2e 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -11,6 +11,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; use common::{self, *}; +use context::CodegenCx; use type_; use value::Value; use libc::{c_uint, c_char}; @@ -18,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, Backend}; +use interfaces::{BuilderMethods, Backend, CommonMethods}; use syntax; use std::borrow::Cow; @@ -59,6 +60,7 @@ impl Backend for Builder<'a, 'll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll type_::Type; + type Context = &'ll llvm::Context; } impl BuilderMethods<'a, 'll, 'tcx> @@ -545,10 +547,10 @@ impl BuilderMethods<'a, 'll, 'tcx> } unsafe { - let llty = val_ty(load); + let llty = CodegenCx::val_ty(load); let v = [ - C_uint_big(llty, range.start), - C_uint_big(llty, range.end) + CodegenCx::c_uint_big(llty, range.start), + CodegenCx::c_uint_big(llty, range.end) ]; llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, @@ -613,7 +615,7 @@ impl BuilderMethods<'a, 'll, 'tcx> // *always* point to a metadata value of the integer 1. // // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = C_i32(self.cx, 1); + let one = CodegenCx::c_i32(self.cx, 1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); } @@ -779,7 +781,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let argtys = inputs.iter().map(|v| { debug!("Asm Input Type: {:?}", *v); - val_ty(*v) + CodegenCx::val_ty(*v) }).collect::>(); debug!("Asm Output Type: {:?}", output); @@ -860,11 +862,11 @@ impl BuilderMethods<'a, 'll, 'tcx> fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { - let elt_ty = val_ty(elt); + let elt_ty = CodegenCx::val_ty(elt); let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); + let vec = self.insert_element(undef, elt, CodegenCx::c_i32(self.cx, 0)); let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64); - self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) + self.shuffle_vector(vec, undef, CodegenCx::c_null(vec_i32_ty)) } } @@ -1171,8 +1173,8 @@ impl BuilderMethods<'a, 'll, 'tcx> fn check_store<'b>(&self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = val_ty(ptr); - let stored_ty = val_ty(val); + let dest_ptr_ty = CodegenCx::val_ty(ptr); + let stored_ty = CodegenCx::val_ty(val); let stored_ptr_ty = stored_ty.ptr_to(); assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); @@ -1192,7 +1194,7 @@ impl BuilderMethods<'a, 'll, 'tcx> typ: &str, llfn: &'ll Value, args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = val_ty(llfn); + let mut fn_ty = CodegenCx::val_ty(llfn); // Strip off pointers while fn_ty.kind() == llvm::TypeKind::Pointer { fn_ty = fn_ty.element_type(); @@ -1204,7 +1206,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let param_tys = fn_ty.func_params(); let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| val_ty(v))) + .zip(args.iter().map(|&v| CodegenCx::val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1215,7 +1217,7 @@ impl BuilderMethods<'a, 'll, 'tcx> .zip(args.iter()) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = val_ty(actual_val); + let actual_ty = CodegenCx::val_ty(actual_val); if expected_ty != actual_ty { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", @@ -1259,7 +1261,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx)); - self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); + self.call(lifetime_intrinsic, &[CodegenCx::c_u64(self.cx, size), ptr], None); } fn call(&self, llfn: &'ll Value, args: &[&'ll Value], diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 4d81c2894345b..d5e9ad9d9ea4f 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -22,6 +22,7 @@ use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; use value::Value; +use interfaces::CommonMethods; use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; @@ -83,7 +84,7 @@ pub fn get_fn( // This can occur on either a crate-local or crate-external // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - if common::val_ty(llfn) != llptrty { + if CodegenCx::val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); consts::ptrcast(llfn, llptrty) } else { @@ -92,7 +93,7 @@ pub fn get_fn( } } else { let llfn = declare::declare_fn(cx, &sym, fn_ty); - assert_eq!(common::val_ty(llfn), llptrty); + assert_eq!(CodegenCx::val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); if instance.def.is_inline(tcx) { diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 2bd758e0d9623..74a005985cd6f 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -13,7 +13,7 @@ //! Code that is useful in various codegen modules. use llvm::{self, TypeKind}; -use llvm::{True, False, Bool}; +use llvm::{True, False, Bool, BasicBlock}; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; use abi; @@ -24,6 +24,7 @@ use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; +use interfaces::{Backend, CommonMethods}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; @@ -194,233 +195,241 @@ impl Funclet<'ll> { } } -pub fn val_ty(v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } +impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; + type Type = &'ll Type; + type Context = &'ll llvm::Context; } -// LLVM constant constructors. -pub fn C_null(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstNull(t) +impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { + fn val_ty(v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } } -} -pub fn C_undef(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMGetUndef(t) + // LLVM constant constructors. + fn c_null(t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstNull(t) + } } -} -pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i as u64, True) + fn c_undef(t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMGetUndef(t) + } } -} -pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i, False) + fn c_int(t: &'ll Type, i: i64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i as u64, True) + } } -} -pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { - unsafe { - let words = [u as u64, (u >> 64) as u64]; - llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + fn c_uint(t: &'ll Type, i: u64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i, False) + } } -} - -pub fn C_bool(cx: &CodegenCx<'ll, '_, &'ll Value>, val: bool) -> &'ll Value { - C_uint(Type::i1(cx), val as u64) -} - -pub fn C_i32(cx: &CodegenCx<'ll, '_, &'ll Value>, i: i32) -> &'ll Value { - C_int(Type::i32(cx), i as i64) -} -pub fn C_u32(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u32) -> &'ll Value { - C_uint(Type::i32(cx), i as u64) -} - -pub fn C_u64(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { - C_uint(Type::i64(cx), i) -} + fn c_uint_big(t: &'ll Type, u: u128) -> &'ll Value { + unsafe { + let words = [u as u64, (u >> 64) as u64]; + llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + } + } -pub fn C_usize(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u64) -> &'ll Value { - let bit_size = cx.data_layout().pointer_size.bits(); - if bit_size < 64 { - // make sure it doesn't overflow - assert!(i < (1< &'ll Value { + Self::c_uint(Type::i1(&self), val as u64) } - C_uint(cx.isize_ty, i) -} + fn c_i32(&self, i: i32) -> &'ll Value { + Self::c_int(Type::i32(&self), i as i64) + } -pub fn C_u8(cx: &CodegenCx<'ll, '_, &'ll Value>, i: u8) -> &'ll Value { - C_uint(Type::i8(cx), i as u64) -} + fn c_u32(&self, i: u32) -> &'ll Value { + Self::c_uint(Type::i32(&self), i as u64) + } + fn c_u64(&self, i: u64) -> &'ll Value { + Self::c_uint(Type::i64(&self), i) + } -// This is a 'c-like' raw string, which differs from -// our boxed-and-length-annotated strings. -pub fn C_cstr( - cx: &CodegenCx<'ll, '_, &'ll Value>, - s: LocalInternedString, - null_terminated: bool, -) -> &'ll Value { - unsafe { - if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) { - return llval; + fn c_usize(&self, i: u64) -> &'ll Value { + let bit_size = self.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1<, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), - cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to()); - C_fat_ptr(cx, cs, C_usize(cx, len as u64)) -} + fn c_u8(&self, i: u8) -> &'ll Value { + Self::c_uint(Type::i8(&self), i as u64) + } -pub fn C_fat_ptr( - cx: &CodegenCx<'ll, '_, &'ll Value>, - ptr: &'ll Value, - meta: &'ll Value -) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(cx, &[ptr, meta], false) -} -pub fn C_struct( - cx: &CodegenCx<'ll, '_, &'ll Value>, - elts: &[&'ll Value], - packed: bool -) -> &'ll Value { - C_struct_in_context(cx.llcx, elts, packed) -} + // This is a 'c-like' raw string, which differs from + // our boxed-and-length-annotated strings. + fn c_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = &self.const_cstr_cache.borrow().get(&s) { + return llval; + } -pub fn C_struct_in_context( - llcx: &'ll llvm::Context, - elts: &[&'ll Value], - packed: bool, -) -> &'ll Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as Bool) + let sc = llvm::LLVMConstStringInContext(&self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = &self.generate_local_symbol_name("str"); + let g = declare::define_global(&self, &sym[..], Self::val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + &self.const_cstr_cache.borrow_mut().insert(s, g); + g + } } -} -pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + // NB: Do not use `do_spill_noroot` to make this into a constant string, or + // you will be kicked off fast isel. See issue #4352 for an example of this. + fn c_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(&self.c_cstr(s, false), + &self.layout_of(&self.tcx.mk_str()).llvm_type(&self).ptr_to()); + &self.c_fat_ptr(cs, &self.c_usize(len as u64)) } -} -pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + fn c_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + &self.c_struct(&[ptr, meta], false) } -} -pub fn C_bytes(cx: &CodegenCx<'ll, '_, &'ll Value>, bytes: &[u8]) -> &'ll Value { - C_bytes_in_context(cx.llcx, bytes) -} + fn c_struct( + &self, + elts: &[&'ll Value], + packed: bool + ) -> &'ll Value { + Self::c_struct_in_context(&self.llcx, elts, packed) + } -pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + fn c_struct_in_context( + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, + ) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) + } } -} -pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + fn c_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } + } - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); + fn c_vector(elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } + } - r + fn c_bytes(&self, bytes: &[u8]) -> &'ll Value { + Self::c_bytes_in_context(&self.llcx, bytes) } -} -pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None + fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); } } -} -pub fn const_to_uint(v: &'ll Value) -> u64 { - unsafe { - llvm::LLVMConstIntGetZExtValue(v) + fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); + + r + } } -} -pub fn is_const_integral(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantInt(v).is_some() + fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if Self::is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } } -} -pub fn is_const_real(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() + fn const_to_uint(v: &'ll Value) -> u64 { + unsafe { + llvm::LLVMConstIntGetZExtValue(v) + } } -} + fn is_const_integral(v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantInt(v).is_some() + } + } -#[inline] -fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { - ((hi as u128) << 64) | (lo as u128) -} + fn is_const_real(v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } + } -pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { - unsafe { - if is_const_integral(v) { - let (mut lo, mut hi) = (0u64, 0u64); - let success = llvm::LLVMRustConstInt128Get(v, sign_ext, - &mut hi, &mut lo); - if success { - Some(hi_lo_to_u128(lo, hi)) + fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { + unsafe { + if Self::is_const_integral(v) { + let (mut lo, mut hi) = (0u64, 0u64); + let success = llvm::LLVMRustConstInt128Get(v, sign_ext, + &mut hi, &mut lo); + if success { + Some(hi_lo_to_u128(lo, hi)) + } else { + None + } } else { None } - } else { - None } } } +#[inline] +fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { + ((hi as u128) << 64) | (lo as u128) +} + pub fn langcall(tcx: TyCtxt, span: Option, msg: &str, @@ -466,7 +475,7 @@ pub fn build_unchecked_rshift( } fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, rhs: &'ll Value) -> &'ll Value { - let rhs_llty = val_ty(rhs); + let rhs_llty = CodegenCx::val_ty(rhs); bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) } @@ -482,9 +491,9 @@ pub fn shift_mask_val( // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. let val = llty.int_width() - 1; if invert { - C_int(mask_llty, !val as i64) + CodegenCx::c_int(mask_llty, !val as i64) } else { - C_uint(mask_llty, val) + CodegenCx::c_uint(mask_llty, val) } }, TypeKind::Vector => { diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index f41808f675af7..450c8145d37e9 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -15,7 +15,7 @@ use rustc::hir::Node; use debuginfo; use base; use monomorphize::MonoItem; -use common::{CodegenCx, val_ty}; +use common::CodegenCx; use declare; use monomorphize::Instance; use syntax_pos::Span; @@ -24,6 +24,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; +use interfaces::CommonMethods; use rustc::ty::layout::{Align, LayoutOf}; @@ -72,13 +73,14 @@ pub fn addr_of_mut( let gv = match kind { Some(kind) if !cx.tcx.sess.fewer_names() => { let name = cx.generate_local_symbol_name(kind); - let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", name); + let gv = declare::define_global(cx, &name[..], + CodegenCx::val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); }); llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); gv }, - _ => declare::define_private_global(cx, val_ty(cv)), + _ => declare::define_private_global(cx, CodegenCx::val_ty(cv)), }; llvm::LLVMSetInitializer(gv, cv); set_global_alignment(cx, gv, align); @@ -310,7 +312,7 @@ pub fn codegen_static<'a, 'tcx>( // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = val_ty(v); + let mut val_llty = CodegenCx::val_ty(v); let v = if val_llty == Type::i1(cx) { val_llty = Type::i8(cx); llvm::LLVMConstZExt(v, val_llty) diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 047d8bbdd3245..d5b6e5e4af05c 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -12,13 +12,13 @@ use llvm; -use common::{C_bytes, CodegenCx, C_i32}; +use common::CodegenCx; use builder::Builder; use declare; use rustc::session::config::DebugInfo; use type_::Type; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use syntax::attr; @@ -30,7 +30,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder<'_, 'll let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)]; + let indices = [CodegenCx::c_i32(bx.cx, 0), CodegenCx::c_i32(bx.cx, 0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { @@ -64,7 +64,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); - llvm::LLVMSetInitializer(section_var, C_bytes(cx, section_contents)); + llvm::LLVMSetInitializer(section_var, CodegenCx::c_bytes(cx, section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 7c225c5a1c7ee..0efaafef0b89b 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -16,11 +16,12 @@ use std; use builder::Builder; use common::*; +use context::CodegenCx; use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; pub fn size_and_align_of_dst( bx: &Builder<'_, 'll, 'tcx, &'ll Value>, @@ -33,8 +34,8 @@ pub fn size_and_align_of_dst( let (size, align) = bx.cx.size_and_align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); - let size = C_usize(bx.cx, size.bytes()); - let align = C_usize(bx.cx, align.abi()); + let size = CodegenCx::c_usize(bx.cx, size.bytes()); + let align = CodegenCx::c_usize(bx.cx, align.abi()); return (size, align); } match t.sty { @@ -48,8 +49,8 @@ pub fn size_and_align_of_dst( // The info in this case is the length of the str, so the size is that // times the unit size. let (size, align) = bx.cx.size_and_align_of(unit); - (bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())), - C_usize(bx.cx, align.abi())) + (bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx, size.bytes())), + CodegenCx::c_usize(bx.cx, align.abi())) } _ => { let cx = bx.cx; @@ -65,8 +66,8 @@ pub fn size_and_align_of_dst( let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = C_usize(cx, sized_size); - let sized_align = C_usize(cx, sized_align); + let sized_size = CodegenCx::c_usize(cx, sized_size); + let sized_align = CodegenCx::c_usize(cx, sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). @@ -92,12 +93,12 @@ pub fn size_and_align_of_dst( // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = match (const_to_opt_u128(sized_align, false), - const_to_opt_u128(unsized_align, false)) { + let align = match (CodegenCx::const_to_opt_u128(sized_align, false), + CodegenCx::const_to_opt_u128(unsized_align, false)) { (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) + CodegenCx::c_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) } _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align), sized_align, @@ -115,7 +116,7 @@ pub fn size_and_align_of_dst( // // `(size + (align-1)) & -align` - let addend = bx.sub(align, C_usize(bx.cx, 1)); + let addend = bx.sub(align, CodegenCx::c_usize(bx.cx, 1)); let size = bx.and(bx.add(size, addend), bx.neg(align)); (size, align) diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index b2a6bf2dd8c0a..648ae15eb3fa0 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -12,4 +12,5 @@ pub trait Backend { type Value; type BasicBlock; type Type; + type Context; } diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs new file mode 100644 index 0000000000000..c43e3b7504a12 --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -0,0 +1,61 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use syntax::symbol::LocalInternedString; + +pub trait CommonMethods : Backend { + fn val_ty(v: Self::Value) -> Self::Type; + + // Constant constructors + fn c_null(t: Self::Type) -> Self::Value; + fn c_undef(t: Self::Type) -> Self::Value; + fn c_int(t: Self::Type, i: i64) -> Self::Value; + fn c_uint(t: Self::Type, i: u64) -> Self::Value; + fn c_uint_big(t: Self::Type, u: u128) -> Self::Value; + fn c_bool(&self, val: bool) -> Self::Value; + fn c_i32(&self, i: i32) -> Self::Value; + fn c_u32(&self, i: u32) -> Self::Value; + fn c_u64(&self, i: u64) -> Self::Value; + fn c_usize(&self, i: u64) -> Self::Value; + fn c_u8(&self, i: u8) -> Self::Value; + fn c_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> Self::Value; + fn c_str_slice(&self, s: LocalInternedString) -> Self::Value; + fn c_fat_ptr( + &self, + ptr: Self::Value, + meta: Self::Value + ) -> Self::Value; + fn c_struct( + &self, + elts: &[Self::Value], + packed: bool + ) -> Self::Value; + fn c_struct_in_context( + llcx: Self::Context, + elts: &[Self::Value], + packed: bool, + ) -> Self::Value; + fn c_array(ty: Self::Type, elts: &[Self::Value]) -> Self::Value; + fn c_vector(elts: &[Self::Value]) -> Self::Value; + fn c_bytes(&self, bytes: &[u8]) -> Self::Value; + fn c_bytes_in_context(llcx: Self::Context, bytes: &[u8]) -> Self::Value; + + fn const_get_elt(v: Self::Value, idx: u64) -> Self::Value; + fn const_get_real(v: Self::Value) -> Option<(f64, bool)>; + fn const_to_uint(v: Self::Value) -> u64; + fn is_const_integral(v: Self::Value) -> bool; + fn is_const_real(v: Self::Value) -> bool; + fn const_to_opt_u128(v: Self::Value, sign_ext: bool) -> Option; +} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index b9a356874ba96..77db6393f6c1e 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -10,6 +10,8 @@ mod builder; mod backend; +mod common; pub use self::builder::BuilderMethods; pub use self::backend::Backend; +pub use self::common::CommonMethods; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index c8ec4fb2144fb..3a55f7b2045db 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -18,6 +18,7 @@ use mir::place::PlaceRef; use mir::operand::{OperandRef, OperandValue}; use base::*; use common::*; +use context::CodegenCx; use declare; use glue; use type_::Type; @@ -30,7 +31,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use rustc::session::Session; use syntax_pos::Span; @@ -125,11 +126,11 @@ pub fn codegen_intrinsic_call( }, "likely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None) + bx.call(expect, &[args[0].immediate(), CodegenCx::c_bool(cx, true)], None) } "unlikely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None) + bx.call(expect, &[args[0].immediate(), CodegenCx::c_bool(cx, false)], None) } "try" => { try_intrinsic(bx, cx, @@ -145,7 +146,7 @@ pub fn codegen_intrinsic_call( } "size_of" => { let tp_ty = substs.type_at(0); - C_usize(cx, cx.size_of(tp_ty).bytes()) + CodegenCx::c_usize(cx, cx.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); @@ -154,12 +155,12 @@ pub fn codegen_intrinsic_call( glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); llsize } else { - C_usize(cx, cx.size_of(tp_ty).bytes()) + CodegenCx::c_usize(cx, cx.size_of(tp_ty).bytes()) } } "min_align_of" => { let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).abi()) + CodegenCx::c_usize(cx, cx.align_of(tp_ty).abi()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -168,20 +169,20 @@ pub fn codegen_intrinsic_call( glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); llalign } else { - C_usize(cx, cx.align_of(tp_ty).abi()) + CodegenCx::c_usize(cx, cx.align_of(tp_ty).abi()) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).pref()) + CodegenCx::c_usize(cx, cx.align_of(tp_ty).pref()) } "type_name" => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - C_str_slice(cx, ty_name) + CodegenCx::c_str_slice(cx, ty_name) } "type_id" => { - C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) + CodegenCx::c_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) } "init" => { let ty = substs.type_at(0); @@ -190,7 +191,14 @@ pub fn codegen_intrinsic_call( // If we store a zero constant, LLVM will drown in vreg allocation for large data // structures, and the generated code will be awful. (A telltale sign of this is // large quantities of `mov [byte ptr foo],0` in the generated code.) - memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1)); + memset_intrinsic( + bx, + false, + ty, + llresult, + CodegenCx::c_u8(cx, 0), + CodegenCx::c_usize(cx, 1) + ); } return; } @@ -201,7 +209,7 @@ pub fn codegen_intrinsic_call( "needs_drop" => { let tp_ty = substs.type_at(0); - C_bool(cx, bx.cx.type_needs_drop(tp_ty)) + CodegenCx::c_bool(cx, bx.cx.type_needs_drop(tp_ty)) } "offset" => { let ptr = args[0].immediate(); @@ -278,9 +286,9 @@ pub fn codegen_intrinsic_call( }; bx.call(expect, &[ args[0].immediate(), - C_i32(cx, rw), + CodegenCx::c_i32(cx, rw), args[1].immediate(), - C_i32(cx, cache_type) + CodegenCx::c_i32(cx, cache_type) ], None) }, "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | @@ -292,12 +300,12 @@ pub fn codegen_intrinsic_call( Some((width, signed)) => match name { "ctlz" | "cttz" => { - let y = C_bool(bx.cx, false); + let y = CodegenCx::c_bool(bx.cx, false); let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); bx.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { - let y = C_bool(bx.cx, true); + let y = CodegenCx::c_bool(bx.cx, true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llfn = cx.get_intrinsic(llvm_name); bx.call(llfn, &[args[0].immediate(), y], None) @@ -697,8 +705,8 @@ fn copy_intrinsic( ) -> &'ll Value { let cx = bx.cx; let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); + let size = CodegenCx::c_usize(cx, size.bytes()); + let align = CodegenCx::c_i32(cx, align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -718,7 +726,7 @@ fn copy_intrinsic( src_ptr, bx.mul(size, count), align, - C_bool(cx, volatile)], + CodegenCx::c_bool(cx, volatile)], None) } @@ -732,8 +740,8 @@ fn memset_intrinsic( ) -> &'ll Value { let cx = bx.cx; let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); + let size = CodegenCx::c_usize(cx, size.bytes()); + let align = CodegenCx::c_i32(cx, align.abi() as i32); let dst = bx.pointercast(dst, Type::i8p(cx)); call_memset(bx, dst, val, bx.mul(size, count), align, volatile) } @@ -749,7 +757,7 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align); + bx.store(CodegenCx::c_null(Type::i8p(&bx.cx)), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { @@ -830,7 +838,7 @@ fn codegen_msvc_try( let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(C_i32(cx, 0)); + normal.ret(CodegenCx::c_i32(cx, 0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); @@ -840,19 +848,19 @@ fn codegen_msvc_try( Some(did) => ::consts::get_static(cx, did), None => bug!("msvc_try_filter not defined"), }; - let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]); + let tok = catchpad.catch_pad(cs, &[tydesc, CodegenCx::c_i32(cx, 0), slot]); let addr = catchpad.load(slot, ptr_align); let i64_align = bx.tcx().data_layout.i64_align; let arg1 = catchpad.load(addr, i64_align); - let val1 = C_i32(cx, 1); + let val1 = CodegenCx::c_i32(cx, 1); let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); catchpad.store(arg1, local_ptr, i64_align); catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); catchpad.catch_ret(tok, caught.llbb()); - caught.ret(C_i32(cx, 1)); + caught.ret(CodegenCx::c_i32(cx, 1)); }); // Note that no invoke is used here because by definition this function @@ -908,7 +916,7 @@ fn codegen_gnu_try( let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(C_i32(cx, 0)); + then.ret(CodegenCx::c_i32(cx, 0)); // Type indicator for the exception being thrown. // @@ -918,11 +926,11 @@ fn codegen_gnu_try( // rust_try ignores the selector. let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1); - catch.add_clause(vals, C_null(Type::i8p(cx))); + catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx))); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); - catch.ret(C_i32(cx, 1)); + catch.ret(CodegenCx::c_i32(cx, 1)); }); // Note that no invoke is used here because by definition this function @@ -1100,8 +1108,8 @@ fn generic_simd_intrinsic( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, i as u64); - match const_to_opt_u128(val, true) { + let val = CodegenCx::const_get_elt(vector, i as u64); + match CodegenCx::const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); None @@ -1111,18 +1119,18 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(C_i32(bx.cx, idx as i32)), + Some(idx) => Some(CodegenCx::c_i32(bx.cx, idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(C_null(llret_ty)) + None => return Ok(CodegenCx::c_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - C_vector(&indices))) + CodegenCx::c_vector(&indices))) } if name == "simd_insert" { @@ -1373,7 +1381,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1473,7 +1481,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1535,7 +1543,7 @@ fn generic_simd_intrinsic( // code is generated // * if the accumulator of the fmul isn't 1, incorrect // code is generated - match const_get_real(acc) { + match CodegenCx::const_get_real(acc) { None => return_error!("accumulator of {} is not a constant", $name), Some((v, loses_info)) => { if $name.contains("mul") && v != 1.0_f64 { @@ -1551,8 +1559,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => C_undef(Type::f32(bx.cx)), - 64 => C_undef(Type::f64(bx.cx)), + 32 => CodegenCx::c_undef(Type::f32(bx.cx)), + 64 => CodegenCx::c_undef(Type::f64(bx.cx)), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 4c04f7c06090a..fbf0f7473dd4b 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -215,7 +215,7 @@ pub enum TypeKind { Double = 3, X86_FP80 = 4, FP128 = 5, - PPC_FP128 = 6, + PPc_FP128 = 6, Label = 7, Integer = 8, Function = 9, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 8db57797eb682..318e05f434ae7 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -10,14 +10,14 @@ use abi::{FnType, FnTypeExt}; use callee; -use common::*; +use context::CodegenCx; use builder::Builder; use consts; use monomorphize; use type_::Type; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; @@ -43,7 +43,10 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to()); let ptr_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align); + let ptr = bx.load( + bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]), + ptr_align + ); bx.nonnull_metadata(ptr); // Vtable loads are invariant bx.set_invariant_load(ptr); @@ -60,7 +63,10 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to()); let usize_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align); + let ptr = bx.load( + bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]), + usize_align + ); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr @@ -90,7 +96,7 @@ pub fn get_vtable( } // Not in the cache. Build it. - let nullptr = C_null(Type::i8p(cx)); + let nullptr = CodegenCx::c_null(Type::i8p(cx)); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { @@ -106,11 +112,11 @@ pub fn get_vtable( // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), - C_usize(cx, size.bytes()), - C_usize(cx, align.abi()) + CodegenCx::c_usize(cx, size.bytes()), + CodegenCx::c_usize(cx, align.abi()) ].iter().cloned().chain(methods).collect(); - let vtable_const = C_struct(cx, &components, false); + let vtable_const = CodegenCx::c_struct(cx, &components, false); let align = cx.data_layout().pointer_align; let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 3f78d45c0a318..36851523cd28b 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -18,7 +18,8 @@ use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; use base; use callee; use builder::{Builder, MemFlags}; -use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef, IntPredicate}; +use common::{self, IntPredicate}; +use context::CodegenCx; use consts; use meth; use monomorphize; @@ -26,7 +27,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -171,7 +172,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { slot.storage_dead(&bx); if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = C_undef(self.landing_pad_type()); + let mut lp = CodegenCx::c_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); @@ -209,7 +210,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } else { let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); - let llval = C_uint_big(switch_llty, values[0]); + let llval = CodegenCx::c_uint_big(switch_llty, values[0]); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } @@ -220,7 +221,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { values.len()); let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); for (&value, target) in values.iter().zip(targets) { - let llval = C_uint_big(switch_llty, value); + let llval = CodegenCx::c_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -324,7 +325,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { let cond = self.codegen_operand(&bx, cond).immediate(); - let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); + let mut const_cond = CodegenCx::const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from @@ -347,7 +348,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Pass the condition through llvm.expect for branch hinting. let expect = bx.cx.get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None); + let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx, expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -365,9 +366,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = CodegenCx::c_str_slice(bx.cx, filename); + let line = CodegenCx::c_u32(bx.cx, loc.line as u32); + let col = CodegenCx::c_u32(bx.cx, loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -378,7 +379,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = C_struct(bx.cx, &[filename, line, col], false); + let file_line_col = CodegenCx::c_struct(bx.cx, + &[filename, line, col], false); let file_line_col = consts::addr_of(bx.cx, file_line_col, align, @@ -389,10 +391,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); + let msg_str = CodegenCx::c_str_slice(bx.cx, msg_str); + let msg_file_line_col = CodegenCx::c_struct( + bx.cx, + &[msg_str, filename, line, col], + false + ); let msg_file_line_col = consts::addr_of(bx.cx, msg_file_line_col, align, @@ -559,7 +563,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) + CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -700,7 +704,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { arg: &ArgType<'tcx, Ty<'tcx>>) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty.llvm_type(bx.cx))); + llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx))); } if arg.is_ignore() { diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 992bcfa86b067..2797f78dd2922 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -20,14 +20,13 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; use builder::Builder; use common::{CodegenCx}; -use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize}; use consts; use type_of::LayoutLlvmExt; use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use super::super::callee; use super::FunctionCx; @@ -42,11 +41,11 @@ pub fn scalar_to_llvm( match cv { Scalar::Bits { size: 0, .. } => { assert_eq!(0, layout.value.size(cx).bytes()); - C_undef(Type::ix(cx, 0)) + CodegenCx::c_undef(Type::ix(cx, 0)) }, Scalar::Bits { bits, size } => { assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = C_uint_big(Type::ix(cx, bitsize), bits); + let llval = CodegenCx::c_uint_big(Type::ix(cx, bitsize), bits); if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { @@ -75,7 +74,7 @@ pub fn scalar_to_llvm( }; let llval = unsafe { llvm::LLVMConstInBoundsGEP( consts::bitcast(base_addr, Type::i8p(cx)), - &C_usize(cx, ptr.offset.bytes()), + &CodegenCx::c_usize(cx, ptr.offset.bytes()), 1, ) }; if layout.value != layout::Pointer { @@ -98,7 +97,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati assert_eq!(offset as usize as u64, offset); let offset = offset as usize; if offset > next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset])); + llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset..offset])); } let ptr_offset = read_target_uint( layout.endian, @@ -116,10 +115,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati next_offset = offset + pointer_size; } if alloc.bytes.len() >= next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset ..])); + llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset ..])); } - C_struct(cx, &llvals, true) + CodegenCx::c_struct(cx, &llvals, true) } pub fn codegen_static_initializer( @@ -209,7 +208,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bug!("simd shuffle field {:?}", field) } }).collect(); - let llval = C_struct(bx.cx, &values?, false); + let llval = CodegenCx::c_struct(bx.cx, &values?, false); Ok((llval, c.ty)) }) .unwrap_or_else(|e| { @@ -220,7 +219,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let ty = self.monomorphize(&ty); let llty = bx.cx.layout_of(ty).llvm_type(bx.cx); - (C_undef(llty), ty) + (CodegenCx::c_undef(llty), ty) }) } } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 3490361f4397d..87e961ec29f1a 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use common::{C_i32, C_null}; use libc::c_uint; use llvm::{self, BasicBlock}; use llvm::debuginfo::DIScope; @@ -25,7 +24,7 @@ use monomorphize::Instance; use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; use type_::Type; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -420,8 +419,8 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = C_null(Type::i8p(bx.cx)); - let sixty_four = C_i32(bx.cx, 64); + let null = CodegenCx::c_null(Type::i8p(bx.cx)); + let sixty_four = CodegenCx::c_i32(bx.cx, 64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 13eddae6c3ac5..4397fb4447940 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -15,14 +15,14 @@ use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; use rustc_data_structures::sync::Lrc; use base; -use common::{CodegenCx, C_undef, C_usize}; +use common::CodegenCx; use builder::{Builder, MemFlags}; use value::Value; use type_of::LayoutLlvmExt; use type_::Type; use glue; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use std::fmt; @@ -74,7 +74,7 @@ impl OperandRef<'tcx, &'ll Value> { layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(CodegenCx::c_undef(layout.immediate_llvm_type(cx))), layout } } @@ -168,7 +168,7 @@ impl OperandRef<'tcx, &'ll Value> { debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = C_undef(llty); + let mut llpair = CodegenCx::c_undef(llty); llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); llpair @@ -232,7 +232,7 @@ impl OperandRef<'tcx, &'ll Value> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, C_usize(bx.cx, i as u64))) + bx.extract_element(llval, CodegenCx::c_usize(bx.cx, i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -460,7 +460,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let layout = bx.cx.layout_of(ty); PlaceRef::new_sized( - C_undef(layout.llvm_type(bx.cx).ptr_to()), + CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()), layout, layout.align, ).load(bx) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 9f1da16976797..624aa4e85caa7 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -15,7 +15,7 @@ use rustc::mir; use rustc::mir::tcx::PlaceTy; use base; use builder::Builder; -use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big, IntPredicate}; +use common::{CodegenCx, IntPredicate}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -23,7 +23,7 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -69,7 +69,7 @@ impl PlaceRef<'tcx, &'ll Value> { let llval = unsafe { LLVMConstInBoundsGEP( consts::bitcast(base_addr, Type::i8p(bx.cx)), - &C_usize(bx.cx, offset.bytes()), + &CodegenCx::c_usize(bx.cx, offset.bytes()), 1, )}; let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to()); @@ -103,7 +103,7 @@ impl PlaceRef<'tcx, &'ll Value> { assert_eq!(count, 0); self.llextra.unwrap() } else { - C_usize(cx, count) + CodegenCx::c_usize(cx, count) } } else { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) @@ -247,7 +247,7 @@ impl PlaceRef<'tcx, &'ll Value> { let meta = self.llextra; - let unaligned_offset = C_usize(cx, offset.bytes()); + let unaligned_offset = CodegenCx::c_usize(cx, offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -258,7 +258,7 @@ impl PlaceRef<'tcx, &'ll Value> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64)); + let align_sub_1 = bx.sub(unsized_align, CodegenCx::c_usize(cx, 1u64)); let offset = bx.and(bx.add(unaligned_offset, align_sub_1), bx.neg(unsized_align)); @@ -288,14 +288,14 @@ impl PlaceRef<'tcx, &'ll Value> { ) -> &'ll Value { let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); if self.layout.abi.is_uninhabited() { - return C_undef(cast_to); + return CodegenCx::c_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index as u128, |def| def.discriminant_for_variant(bx.cx.tcx, index).val); - return C_uint_big(cast_to, discr_val); + return CodegenCx::c_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, @@ -326,22 +326,22 @@ impl PlaceRef<'tcx, &'ll Value> { if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + CodegenCx::c_null(niche_llty) } else { - C_uint_big(niche_llty, niche_start) + CodegenCx::c_uint_big(niche_llty, niche_start) }; bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval), - C_uint(cast_to, *niche_variants.start() as u64), - C_uint(cast_to, dataful_variant as u64)) + CodegenCx::c_uint(cast_to, *niche_variants.start() as u64), + CodegenCx::c_uint(cast_to, dataful_variant as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); - let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); - let lldiscr_max = C_uint(niche_llty, *niche_variants.end() as u64); + let lldiscr = bx.sub(lldiscr, CodegenCx::c_uint_big(niche_llty, delta)); + let lldiscr_max = CodegenCx::c_uint(niche_llty, *niche_variants.end() as u64); bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max), bx.intcast(lldiscr, cast_to, false), - C_uint(cast_to, dataful_variant as u64)) + CodegenCx::c_uint(cast_to, dataful_variant as u64)) } } } @@ -363,7 +363,7 @@ impl PlaceRef<'tcx, &'ll Value> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - C_uint_big(ptr.layout.llvm_type(bx.cx), to), + CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx), to), ptr.llval, ptr.align); } @@ -379,10 +379,10 @@ impl PlaceRef<'tcx, &'ll Value> { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to()); - let fill_byte = C_u8(bx.cx, 0); + let fill_byte = CodegenCx::c_u8(bx.cx, 0); let (size, align) = self.layout.size_and_align(); - let size = C_usize(bx.cx, size.bytes()); - let align = C_u32(bx.cx, align.abi() as u32); + let size = CodegenCx::c_usize(bx.cx, size.bytes()); + let align = CodegenCx::c_u32(bx.cx, align.abi() as u32); base::call_memset(bx, llptr, fill_byte, size, align, false); } @@ -392,10 +392,10 @@ impl PlaceRef<'tcx, &'ll Value> { .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + CodegenCx::c_null(niche_llty) } else { - C_uint_big(niche_llty, niche_value) + CodegenCx::c_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -406,7 +406,7 @@ impl PlaceRef<'tcx, &'ll Value> { pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llindex: &'ll Value) -> PlaceRef<'tcx, &'ll Value> { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), + llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx, 0), llindex]), llextra: None, layout: self.layout.field(bx.cx, 0), align: self.align @@ -481,7 +481,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // so we generate an abort let fnname = bx.cx.get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = C_undef(layout.llvm_type(bx.cx).ptr_to()); + let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()); PlaceRef::new_sized(llval, layout, layout.align) } } @@ -514,27 +514,27 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); + let lloffset = CodegenCx::c_usize(bx.cx, offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); + let lloffset = CodegenCx::c_usize(bx.cx, offset as u64); let lllen = cg_base.len(bx.cx); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - C_usize(bx.cx, from as u64)); + CodegenCx::c_usize(bx.cx, from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - C_usize(bx.cx, (from as u64) + (to as u64)))); + CodegenCx::c_usize(bx.cx, (from as u64) + (to as u64)))); } // Cast the place pointer type to the new diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index dd8b92afa7259..12d69adbfd772 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -19,18 +19,15 @@ use std::{u128, i128}; use base; use builder::Builder; use callee; -use common::{self, val_ty}; -use common::{ - C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, - C_uint, C_uint_big, IntPredicate, RealPredicate -}; +use common::{self, IntPredicate, RealPredicate}; +use context::CodegenCx; use consts; use monomorphize; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::BuilderMethods; +use interfaces::{BuilderMethods, CommonMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -106,28 +103,28 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { return bx; } - let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval; + let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx, 0)).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = C_i32(bx.cx, dest.align.abi() as i32); - let size = C_usize(bx.cx, dest.layout.size.bytes()); + let align = CodegenCx::c_i32(bx.cx, dest.align.abi() as i32); + let size = CodegenCx::c_usize(bx.cx, dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let fill = C_u8(bx.cx, 0); + if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 { + let fill = CodegenCx::c_u8(bx.cx, 0); base::call_memset(&bx, start, fill, size, align, false); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&bx, v); - if common::val_ty(v) == Type::i8(bx.cx) { + if CodegenCx::val_ty(v) == Type::i8(bx.cx) { base::call_memset(&bx, start, v, size, align, false); return bx; } } - let count = C_usize(bx.cx, count); + let count = CodegenCx::c_usize(bx.cx, count); let end = dest.project_index(&bx, count).llval; let header_bx = bx.build_sibling_block("repeat_loop_header"); @@ -135,7 +132,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); - let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]); + let current = header_bx.phi(CodegenCx::val_ty(start), &[start], &[bx.llbb()]); let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); @@ -143,7 +140,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { cg_elem.val.store(&body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]); + let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx, 1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -296,7 +293,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let ll_t_out = cast.immediate_llvm_type(bx.cx); if operand.layout.abi.is_uninhabited() { return (bx, OperandRef { - val: OperandValue::Immediate(C_undef(ll_t_out)), + val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)), layout: cast, }); } @@ -310,7 +307,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let discr_val = def .discriminant_for_variant(bx.cx.tcx, index) .val; - let discr = C_uint_big(ll_t_out, discr_val); + let discr = CodegenCx::c_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -341,7 +338,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { base::call_assume(&bx, bx.icmp( IntPredicate::IntULE, llval, - C_uint_big(ll_t_in, *scalar.valid_range.end()) + CodegenCx::c_uint_big(ll_t_in, *scalar.valid_range.end()) )); } } @@ -492,7 +489,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bx.cx.type_is_sized(ty)); - let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes()); + let val = CodegenCx::c_usize(bx.cx, bx.cx.size_of(ty).bytes()); let tcx = bx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -503,8 +500,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let (size, align) = bx.cx.size_and_align_of(content_ty); - let llsize = C_usize(bx.cx, size.bytes()); - let llalign = C_usize(bx.cx, align.abi()); + let llsize = CodegenCx::c_usize(bx.cx, size.bytes()); + let llalign = CodegenCx::c_usize(bx.cx, align.abi()); let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty)); let llty_ptr = box_layout.llvm_type(bx.cx); @@ -551,7 +548,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { let n = n.unwrap_usize(bx.cx.tcx); - return common::C_usize(bx.cx, n); + return CodegenCx::c_usize(bx.cx, n); } } } @@ -609,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - C_bool(bx.cx, match op { + CodegenCx::c_bool(bx.cx, match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -688,7 +685,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // while the current crate doesn't use overflow checks. if !bx.cx.check_overflow { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, C_bool(bx.cx, false)); + return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx, false)); } let (val, of) = match op { @@ -707,12 +704,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.extract_value(res, 1)) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); + let lhs_llty = CodegenCx::val_ty(lhs); + let rhs_llty = CodegenCx::val_ty(rhs); let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(IntPredicate::IntNE, outer_bits, C_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, CodegenCx::c_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -839,9 +836,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); - let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); + let infinity_bits = CodegenCx::c_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); bx.select(overflow, infinity, bx.uitofp(x, float_ty)) } else { @@ -910,8 +907,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, } let float_bits_to_llval = |bits| { let bits_llval = match float_ty.float_width() { - 32 => C_u32(bx.cx, bits as u32), - 64 => C_u64(bx.cx, bits as u64), + 32 => CodegenCx::c_u32(bx.cx, bits as u32), + 64 => CodegenCx::c_u64(bx.cx, bits as u64), n => bug!("unsupported float width {}", n), }; consts::bitcast(bits_llval, float_ty) @@ -966,8 +963,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); + let int_max = CodegenCx::c_uint_big(int_ty, int_max(signed, int_ty)); + let int_min = CodegenCx::c_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -976,7 +973,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, C_uint(int_ty, 0)) + bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, CodegenCx::c_uint(int_ty, 0)) } else { s1 } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index cfbd1b766674c..6dc38eeb0825d 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -323,7 +323,7 @@ impl Type { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, - TypeKind::FP128 | TypeKind::PPC_FP128 => 128, + TypeKind::FP128 | TypeKind::PPc_FP128 => 128, _ => bug!("llvm_float_width called on a non-float type") } } From 67e865f560ccf35ddb6acef30716a4d6b9c02cc1 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 28 Aug 2018 17:50:57 +0200 Subject: [PATCH 22/76] Replaced Codegen field access by trait method --- src/librustc_codegen_llvm/abi.rs | 4 +- src/librustc_codegen_llvm/asm.rs | 12 +-- src/librustc_codegen_llvm/base.rs | 38 ++++----- src/librustc_codegen_llvm/debuginfo/gdb.rs | 6 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 2 +- .../debuginfo/source_loc.rs | 6 +- src/librustc_codegen_llvm/glue.rs | 18 ++-- src/librustc_codegen_llvm/intrinsic.rs | 82 +++++++++--------- src/librustc_codegen_llvm/meth.rs | 8 +- src/librustc_codegen_llvm/mir/block.rs | 76 ++++++++--------- src/librustc_codegen_llvm/mir/constant.rs | 10 +-- src/librustc_codegen_llvm/mir/mod.rs | 18 ++-- src/librustc_codegen_llvm/mir/operand.rs | 54 ++++++------ src/librustc_codegen_llvm/mir/place.rs | 74 ++++++++-------- src/librustc_codegen_llvm/mir/rvalue.rs | 84 +++++++++---------- 15 files changed, 246 insertions(+), 246 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 9b98888ddc37b..be769a5367bba 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -202,7 +202,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { if self.is_ignore() { return; } - let cx = bx.cx; + let cx = bx.cx(); if self.is_sized_indirect() { OperandValue::Ref(val, None, self.layout.align).store(bx, dst) } else if self.is_unsized_indirect() { @@ -721,7 +721,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { // by the LLVM verifier. if let layout::Int(..) = scalar.value { if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx.cx); + let range = scalar.valid_range_exclusive(bx.cx()); if range.start != range.end { bx.range_metadata(callsite, range); } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index d9424293689cd..eb4ea9c24893a 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -44,7 +44,7 @@ pub fn codegen_inline_asm( if out.is_indirect { indirect_outputs.push(place.load(bx).immediate()); } else { - output_types.push(place.layout.llvm_type(bx.cx)); + output_types.push(place.layout.llvm_type(bx.cx())); } } if !indirect_outputs.is_empty() { @@ -76,9 +76,9 @@ pub fn codegen_inline_asm( // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => Type::void(bx.cx), + 0 => Type::void(bx.cx()), 1 => output_types[0], - _ => Type::struct_(bx.cx, &output_types, false) + _ => Type::struct_(bx.cx(), &output_types, false) }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); @@ -108,13 +108,13 @@ pub fn codegen_inline_asm( // back to source locations. See #17552. unsafe { let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx, + let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx, key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = CodegenCx::c_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = CodegenCx::c_i32(bx.cx(), ia.ctxt.outer().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1)); + llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1)); } return true; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index f01aade991e60..e5e2f37d84688 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -234,24 +234,24 @@ pub fn unsize_thin_ptr( &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to(); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to(); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { assert_eq!(def_a, def_b); - let src_layout = bx.cx.layout_of(src_ty); - let dst_layout = bx.cx.layout_of(dst_ty); + let src_layout = bx.cx().layout_of(src_ty); + let dst_layout = bx.cx().layout_of(dst_ty); let mut result = None; for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx, i); + let src_f = src_layout.field(bx.cx(), i); assert_eq!(src_layout.fields.offset(i).bytes(), 0); assert_eq!(dst_layout.fields.offset(i).bytes(), 0); if src_f.is_zst() { @@ -259,15 +259,15 @@ pub fn unsize_thin_ptr( } assert_eq!(src_layout.size, src_f.size); - let dst_f = dst_layout.field(bx.cx, i); + let dst_f = dst_layout.field(bx.cx(), i); assert_ne!(src_f.ty, dst_f.ty); assert_eq!(result, None); result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); } let (lldata, llextra) = result.unwrap(); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)), - bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true))) + (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 0, true)), + bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 1, true))) } _ => bug!("unsize_thin_ptr: called on bad types"), } @@ -289,8 +289,8 @@ pub fn coerce_unsized_into( // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. - let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR); - (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info) + let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR); + (bx.pointercast(base, thin_ptr.llvm_type(bx.cx())), info) } OperandValue::Immediate(base) => { unsize_thin_ptr(bx, base, src_ty, dst_ty) @@ -385,7 +385,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { } pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { - let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume"); + let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); bx.call(assume_intrinsic, &[val], None); } @@ -417,7 +417,7 @@ pub fn to_immediate_scalar( scalar: &layout::Scalar, ) -> &'ll Value { if scalar.is_bool() { - return bx.trunc(val, Type::i1(bx.cx)); + return bx.trunc(val, Type::i1(bx.cx())); } val } @@ -473,10 +473,10 @@ pub fn call_memset( align: &'ll Value, volatile: bool, ) -> &'ll Value { - let ptr_width = &bx.cx.sess().target.target.target_pointer_width; + let ptr_width = &bx.cx().sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key); - let volatile = CodegenCx::c_bool(bx.cx, volatile); + let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key); + let volatile = CodegenCx::c_bool(bx.cx(), volatile); bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index d5b6e5e4af05c..01cf92bbd0f7c 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -26,11 +26,11 @@ use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder<'_, 'll, '_, &'ll Value>) { - if needs_gdb_debug_scripts_section(bx.cx) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx); + if needs_gdb_debug_scripts_section(bx.cx()) { + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [CodegenCx::c_i32(bx.cx, 0), CodegenCx::c_i32(bx.cx, 0)]; + let indices = [CodegenCx::c_i32(bx.cx(), 0), CodegenCx::c_i32(bx.cx(), 0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 87ffde0934be5..50f1e52b5bd88 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -494,7 +494,7 @@ pub fn declare_local( span: Span, ) { assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); - let cx = bx.cx; + let cx = bx.cx(); let file = span_start(cx, span).file; let file_metadata = file_metadata(cx, diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index c6698f2451a12..3f5c13ba1421c 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -43,7 +43,7 @@ pub fn set_source_location( let dbg_loc = if function_debug_context.source_locations_enabled.get() { debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); - let loc = span_start(bx.cx, span); + let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) } else { UnknownLocation @@ -89,7 +89,7 @@ pub fn set_debug_location( // For MSVC, set the column number to zero. // Otherwise, emit it. This mimics clang behaviour. // See discussion in https://github.com/rust-lang/rust/issues/42921 - let col_used = if bx.cx.sess().target.target.options.is_like_msvc { + let col_used = if bx.cx().sess().target.target.options.is_like_msvc { UNKNOWN_COLUMN_NUMBER } else { col as c_uint @@ -98,7 +98,7 @@ pub fn set_debug_location( unsafe { Some(llvm::LLVMRustDIBuilderCreateDebugLocation( - debug_context(bx.cx).llcontext, + debug_context(bx.cx()).llcontext, line as c_uint, col_used, scope, diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 0efaafef0b89b..c667983e1e3dd 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -30,12 +30,12 @@ pub fn size_and_align_of_dst( ) -> (&'ll Value, &'ll Value) { debug!("calculate size of DST: {}; with lost info: {:?}", t, info); - if bx.cx.type_is_sized(t) { - let (size, align) = bx.cx.size_and_align_of(t); + if bx.cx().type_is_sized(t) { + let (size, align) = bx.cx().size_and_align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); - let size = CodegenCx::c_usize(bx.cx, size.bytes()); - let align = CodegenCx::c_usize(bx.cx, align.abi()); + let size = CodegenCx::c_usize(bx.cx(), size.bytes()); + let align = CodegenCx::c_usize(bx.cx(), align.abi()); return (size, align); } match t.sty { @@ -48,12 +48,12 @@ pub fn size_and_align_of_dst( let unit = t.sequence_element_type(bx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let (size, align) = bx.cx.size_and_align_of(unit); - (bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx, size.bytes())), - CodegenCx::c_usize(bx.cx, align.abi())) + let (size, align) = bx.cx().size_and_align_of(unit); + (bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx(), size.bytes())), + CodegenCx::c_usize(bx.cx(), align.abi())) } _ => { - let cx = bx.cx; + let cx = bx.cx(); // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we // want to avoid, as the unsized field's alignment could be smaller. @@ -116,7 +116,7 @@ pub fn size_and_align_of_dst( // // `(size + (align-1)) & -align` - let addend = bx.sub(align, CodegenCx::c_usize(bx.cx, 1)); + let addend = bx.sub(align, CodegenCx::c_usize(bx.cx(), 1)); let size = bx.and(bx.add(size, addend), bx.neg(align)); (size, align) diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 3a55f7b2045db..a270daa13aa37 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -97,7 +97,7 @@ pub fn codegen_intrinsic_call( llresult: &'ll Value, span: Span, ) { - let cx = bx.cx; + let cx = bx.cx(); let tcx = cx.tcx; let (def_id, substs) = match callee_ty.sty { @@ -209,7 +209,7 @@ pub fn codegen_intrinsic_call( "needs_drop" => { let tp_ty = substs.type_at(0); - CodegenCx::c_bool(cx, bx.cx.type_needs_drop(tp_ty)) + CodegenCx::c_bool(cx, bx.cx().type_needs_drop(tp_ty)) } "offset" => { let ptr = args[0].immediate(); @@ -265,12 +265,12 @@ pub fn codegen_intrinsic_call( to_immediate(bx, load, cx.layout_of(tp_ty)) }, "volatile_store" => { - let dst = args[0].deref(bx.cx); + let dst = args[0].deref(bx.cx()); args[1].val.volatile_store(bx, dst); return; }, "unaligned_volatile_store" => { - let dst = args[0].deref(bx.cx); + let dst = args[0].deref(bx.cx()); args[1].val.unaligned_volatile_store(bx, dst); return; }, @@ -300,12 +300,12 @@ pub fn codegen_intrinsic_call( Some((width, signed)) => match name { "ctlz" | "cttz" => { - let y = CodegenCx::c_bool(bx.cx, false); + let y = CodegenCx::c_bool(bx.cx(), false); let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); bx.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { - let y = CodegenCx::c_bool(bx.cx, true); + let y = CodegenCx::c_bool(bx.cx(), true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llfn = cx.get_intrinsic(llvm_name); bx.call(llfn, &[args[0].immediate(), y], None) @@ -328,7 +328,7 @@ pub fn codegen_intrinsic_call( let intrinsic = format!("llvm.{}{}.with.overflow.i{}", if signed { 's' } else { 'u' }, &name[..3], width); - let llfn = bx.cx.get_intrinsic(&intrinsic); + let llfn = bx.cx().get_intrinsic(&intrinsic); // Convert `i1` to a `bool`, and write it to the out parameter let pair = bx.call(llfn, &[ @@ -408,7 +408,7 @@ pub fn codegen_intrinsic_call( }, "discriminant_value" => { - args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty) + args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty) } name if name.starts_with("simd_") => { @@ -472,7 +472,7 @@ pub fn codegen_intrinsic_call( failorder, weak); let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx)); + let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx())); let dest = result.project_field(bx, 0); bx.store(val, dest.llval, dest.align); @@ -543,7 +543,7 @@ pub fn codegen_intrinsic_call( } "nontemporal_store" => { - let dst = args[0].deref(bx.cx); + let dst = args[0].deref(bx.cx()); args[1].val.nontemporal_store(bx, dst); return; } @@ -614,7 +614,7 @@ pub fn codegen_intrinsic_call( // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bx.cx.type_needs_drop(arg.layout.ty)); + assert!(!bx.cx().type_needs_drop(arg.layout.ty)); let (ptr, align) = match arg.val { OperandValue::Ref(ptr, None, align) => (ptr, align), _ => bug!() @@ -625,11 +625,11 @@ pub fn codegen_intrinsic_call( }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); vec![ bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64)) @@ -639,7 +639,7 @@ pub fn codegen_intrinsic_call( // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))] + vec![bx.trunc(arg.immediate(), Type::ix(bx.cx(), llvm_width as u64))] } _ => vec![arg.immediate()], } @@ -703,7 +703,7 @@ fn copy_intrinsic( src: &'ll Value, count: &'ll Value, ) -> &'ll Value { - let cx = bx.cx; + let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); let size = CodegenCx::c_usize(cx, size.bytes()); let align = CodegenCx::c_i32(cx, align.abi() as i32); @@ -738,7 +738,7 @@ fn memset_intrinsic( val: &'ll Value, count: &'ll Value ) -> &'ll Value { - let cx = bx.cx; + let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); let size = CodegenCx::c_usize(cx, size.bytes()); let align = CodegenCx::c_i32(cx, align.abi() as i32); @@ -757,7 +757,7 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(CodegenCx::c_null(Type::i8p(&bx.cx)), dest, ptr_align); + bx.store(CodegenCx::c_null(Type::i8p(&bx.cx())), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { @@ -781,9 +781,9 @@ fn codegen_msvc_try( dest: &'ll Value, ) { let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; + let cx = bx.cx(); - bx.set_personality_fn(bx.cx.eh_personality()); + bx.set_personality_fn(bx.cx().eh_personality()); let normal = bx.build_sibling_block("normal"); let catchswitch = bx.build_sibling_block("catchswitch"); @@ -890,7 +890,7 @@ fn codegen_gnu_try( dest: &'ll Value, ) { let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; + let cx = bx.cx(); // Codegens the shims described above: // @@ -925,7 +925,7 @@ fn codegen_gnu_try( // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); - let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1); + let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx))); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; @@ -1119,7 +1119,7 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(CodegenCx::c_i32(bx.cx, idx as i32)), + Some(idx) => Some(CodegenCx::c_i32(bx.cx(), idx as i32)), } }) .collect(); @@ -1161,7 +1161,7 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = Type::i1(bx.cx); + let i1 = Type::i1(bx.cx()); let i1xn = Type::vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); @@ -1223,7 +1223,7 @@ fn generic_simd_intrinsic( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); - let intrinsic = bx.cx.get_intrinsic(&llvm_name); + let intrinsic = bx.cx().get_intrinsic(&llvm_name); let c = bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::>(), None); @@ -1380,27 +1380,27 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = Type::i32(bx.cx()); + let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); + let i1 = Type::i1(bx.cx()); let i1xn = Type::vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, + let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, Type::func(&[ llvm_pointer_vec_ty, alignment_ty, @@ -1480,29 +1480,29 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = Type::i32(bx.cx()); + let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); + let i1 = Type::i1(bx.cx()); let i1xn = Type::vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = Type::void(bx.cx); + let ret_t = Type::void(bx.cx()); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, + let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, Type::func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, @@ -1559,8 +1559,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => CodegenCx::c_undef(Type::f32(bx.cx)), - 64 => CodegenCx::c_undef(Type::f64(bx.cx)), + 32 => CodegenCx::c_undef(Type::f32(bx.cx())), + 64 => CodegenCx::c_undef(Type::f64(bx.cx())), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1637,7 +1637,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, } // boolean reductions operate on vectors of i1s: - let i1 = Type::i1(bx.cx); + let i1 = Type::i1(bx.cx()); let i1xn = Type::vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; @@ -1648,7 +1648,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, if !$boolean { r } else { - bx.zext(r, Type::bool(bx.cx)) + bx.zext(r, Type::bool(bx.cx())) } ) }, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 318e05f434ae7..8c08428f83224 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -41,10 +41,10 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to()); + let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx()).ptr_to().ptr_to()); let ptr_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( - bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]), + bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]), ptr_align ); bx.nonnull_metadata(ptr); @@ -61,10 +61,10 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to()); + let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx()).ptr_to()); let usize_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( - bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]), + bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]), usize_align ); // Vtable loads are invariant diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 36851523cd28b..9ce8887045570 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -177,7 +177,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); } else { - bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle); + bx.call(bx.cx().eh_unwind_resume(), &[lp0], cleanup_bundle); bx.unreachable(); } } @@ -185,7 +185,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::TerminatorKind::Abort => { // Call core::intrinsics::abort() - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); bx.unreachable(); } @@ -209,7 +209,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.cond_br(discr.immediate(), lltrue, llfalse); } } else { - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); let llval = CodegenCx::c_uint_big(switch_llty, values[0]); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); @@ -219,7 +219,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let switch = bx.switch(discr.immediate(), llblock(self, *otherwise), values.len()); - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); for (&value, target) in values.iter().zip(targets) { let llval = CodegenCx::c_uint_big(switch_llty, value); let llbb = llblock(self, *target); @@ -269,7 +269,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } }; bx.load( - bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()), + bx.pointercast(llslot, cast_ty.llvm_type(bx.cx()).ptr_to()), self.fn_ty.ret.layout.align) } }; @@ -283,7 +283,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty); + let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx, ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. @@ -302,20 +302,20 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }; let (drop_fn, fn_ty) = match ty.sty { ty::Dynamic(..) => { - let fn_ty = drop_fn.ty(bx.cx.tcx); - let sig = common::ty_fn_sig(bx.cx, fn_ty); + let fn_ty = drop_fn.ty(bx.cx().tcx); + let sig = common::ty_fn_sig(bx.cx(), fn_ty); let sig = bx.tcx().normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - let fn_ty = FnType::new_vtable(bx.cx, sig, &[]); + let fn_ty = FnType::new_vtable(bx.cx(), sig, &[]); let vtable = args[1]; args = &args[..1]; (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) } _ => { - (callee::get_fn(bx.cx, drop_fn), - FnType::of_instance(bx.cx, &drop_fn)) + (callee::get_fn(bx.cx(), drop_fn), + FnType::of_instance(bx.cx(), &drop_fn)) } }; do_call(self, bx, fn_ty, drop_fn, args, @@ -334,7 +334,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow { if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { const_cond = Some(expected); } @@ -347,8 +347,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } // Pass the condition through llvm.expect for branch hinting. - let expect = bx.cx.get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx, expected)], None); + let expect = bx.cx().get_intrinsic(&"llvm.expect.i1"); + let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx(), expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -366,9 +366,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = CodegenCx::c_str_slice(bx.cx, filename); - let line = CodegenCx::c_u32(bx.cx, loc.line as u32); - let col = CodegenCx::c_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = CodegenCx::c_str_slice(bx.cx(), filename); + let line = CodegenCx::c_u32(bx.cx(), loc.line as u32); + let col = CodegenCx::c_u32(bx.cx(), loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -379,9 +379,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = CodegenCx::c_struct(bx.cx, + let file_line_col = CodegenCx::c_struct(bx.cx(), &[filename, line, col], false); - let file_line_col = consts::addr_of(bx.cx, + let file_line_col = consts::addr_of(bx.cx(), file_line_col, align, Some("panic_bounds_check_loc")); @@ -391,13 +391,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = CodegenCx::c_str_slice(bx.cx, msg_str); + let msg_str = CodegenCx::c_str_slice(bx.cx(), msg_str); let msg_file_line_col = CodegenCx::c_struct( - bx.cx, + bx.cx(), &[msg_str, filename, line, col], false ); - let msg_file_line_col = consts::addr_of(bx.cx, + let msg_file_line_col = consts::addr_of(bx.cx(), msg_file_line_col, align, Some("panic_loc")); @@ -409,8 +409,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = FnType::of_instance(bx.cx(), &instance); + let llfn = callee::get_fn(bx.cx(), instance); // Codegen the actual panic invoke/call. do_call(self, bx, fn_ty, llfn, &args, None, cleanup); @@ -432,7 +432,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { - (Some(ty::Instance::resolve(bx.cx.tcx, + (Some(ty::Instance::resolve(bx.cx().tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap()), @@ -471,7 +471,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // we can do what we like. Here, we declare that transmuting // into an uninhabited type is impossible, so anything following // it must be unreachable. - assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); + assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited); bx.unreachable(); } return; @@ -485,7 +485,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let fn_ty = match def { Some(ty::InstanceDef::Virtual(..)) => { - FnType::new_vtable(bx.cx, sig, &extra_args) + FnType::new_vtable(bx.cx(), sig, &extra_args) } Some(ty::InstanceDef::DropGlue(_, None)) => { // empty drop glue - a nop. @@ -493,7 +493,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { funclet_br(self, bx, target); return; } - _ => FnType::new(bx.cx, sig, &extra_args) + _ => FnType::new(bx.cx(), sig, &extra_args) }; // emit a panic instead of instantiating an uninhabited type @@ -563,7 +563,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) + CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx()).ptr_to()) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -597,7 +597,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty), + layout: bx.cx().layout_of(ty), }; }, @@ -615,7 +615,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty) + layout: bx.cx().layout_of(ty) }; } } @@ -625,7 +625,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }).collect(); - let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx); + let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx); codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest, terminator.source_info.span); @@ -682,7 +682,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, - (None, Some(instance)) => callee::get_fn(bx.cx, instance), + (None, Some(instance)) => callee::get_fn(bx.cx(), instance), _ => span_bug!(span, "no llfn for call"), }; @@ -704,7 +704,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { arg: &ArgType<'tcx, Ty<'tcx>>) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx))); + llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx()))); } if arg.is_ignore() { @@ -763,7 +763,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()), + llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx()).ptr_to()), align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument @@ -814,7 +814,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { &mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value> ) -> PlaceRef<'tcx, &'ll Value> { - let cx = bx.cx; + let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { @@ -951,7 +951,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), LocalRef::Operand(None) => { - let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst)); + let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst)); assert!(!dst_layout.ty.has_erasable_regions()); let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); place.storage_live(bx); @@ -975,7 +975,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { src: &mir::Operand<'tcx>, dst: PlaceRef<'tcx, &'ll Value>) { let src = self.codegen_operand(bx, src); - let llty = src.layout.llvm_type(bx.cx); + let llty = src.layout.llvm_type(bx.cx()); let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); let align = src.layout.align.min(dst.layout.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 2797f78dd2922..b373f606da7e4 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -195,20 +195,20 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { c, )?; if let Some(prim) = field.val.try_to_scalar() { - let layout = bx.cx.layout_of(field_ty); + let layout = bx.cx().layout_of(field_ty); let scalar = match layout.abi { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; Ok(scalar_to_llvm( - bx.cx, prim, scalar, - layout.immediate_llvm_type(bx.cx), + bx.cx(), prim, scalar, + layout.immediate_llvm_type(bx.cx()), )) } else { bug!("simd shuffle field {:?}", field) } }).collect(); - let llval = CodegenCx::c_struct(bx.cx, &values?, false); + let llval = CodegenCx::c_struct(bx.cx(), &values?, false); Ok((llval, c.ty)) }) .unwrap_or_else(|e| { @@ -218,7 +218,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { ); // We've errored, so we don't have to produce working code. let ty = self.monomorphize(&ty); - let llty = bx.cx.layout_of(ty).llvm_type(bx.cx); + let llty = bx.cx().layout_of(ty).llvm_type(bx.cx()); (CodegenCx::c_undef(llty), ty) }) } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 87e961ec29f1a..12eae8a9155bb 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -274,7 +274,7 @@ pub fn codegen_mir( let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty)); + let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty)); assert!(!layout.ty.has_erasable_regions()); if let Some(name) = decl.name { @@ -284,7 +284,7 @@ pub fn codegen_mir( if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx, layout); + return LocalRef::new_operand(bx.cx(), layout); } debug!("alloc: {:?} ({}) -> place", local, name); @@ -326,7 +326,7 @@ pub fn codegen_mir( // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx, layout) + LocalRef::new_operand(bx.cx(), layout) } } }; @@ -419,8 +419,8 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = CodegenCx::c_null(Type::i8p(bx.cx)); - let sixty_four = CodegenCx::c_i32(bx.cx, 64); + let null = CodegenCx::c_null(Type::i8p(bx.cx())); + let sixty_four = CodegenCx::c_i32(bx.cx(), 64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } @@ -479,7 +479,7 @@ fn arg_local_refs( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; @@ -522,7 +522,7 @@ fn arg_local_refs( let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { - return local(OperandRef::new_zst(bx.cx, arg.layout)); + return local(OperandRef::new_zst(bx.cx(), arg.layout)); } PassMode::Direct(_) => { let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); @@ -601,7 +601,7 @@ fn arg_local_refs( // Or is it the closure environment? let (closure_layout, env_ref) = match arg.layout.ty.sty { ty::RawPtr(ty::TypeAndMut { ty, .. }) | - ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true), + ty::Ref(_, ty, _) => (bx.cx().layout_of(ty), true), _ => (arg.layout, false) }; @@ -623,7 +623,7 @@ fn arg_local_refs( let env_alloca = !env_ref && unsafe { llvm::LLVMRustVersionMajor() < 6 }; let env_ptr = if env_alloca { let scratch = PlaceRef::alloca(bx, - bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)), "__debuginfo_env_ptr"); bx.store(place.llval, scratch.llval, scratch.align); scratch.llval diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 4397fb4447940..3c43e4cc271e2 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -82,10 +82,10 @@ impl OperandRef<'tcx, &'ll Value> { pub fn from_const(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, val: &'tcx ty::Const<'tcx>) -> Result, Lrc>> { - let layout = bx.cx.layout_of(val.ty); + let layout = bx.cx().layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx, layout)); + return Ok(OperandRef::new_zst(bx.cx(), layout)); } let val = match val.val { @@ -96,10 +96,10 @@ impl OperandRef<'tcx, &'ll Value> { _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; let llval = scalar_to_llvm( - bx.cx, + bx.cx(), x, scalar, - layout.immediate_llvm_type(bx.cx), + layout.immediate_llvm_type(bx.cx()), ); OperandValue::Immediate(llval) }, @@ -109,14 +109,14 @@ impl OperandRef<'tcx, &'ll Value> { _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; let a_llval = scalar_to_llvm( - bx.cx, + bx.cx(), a, a_scalar, - layout.scalar_pair_element_llvm_type(bx.cx, 0, true), + layout.scalar_pair_element_llvm_type(bx.cx(), 0, true), ); - let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true); + let b_layout = layout.scalar_pair_element_llvm_type(bx.cx(), 1, true); let b_llval = scalar_to_llvm( - bx.cx, + bx.cx(), b, b_scalar, b_layout, @@ -164,7 +164,7 @@ impl OperandRef<'tcx, &'ll Value> { /// For other cases, see `immediate`. pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> &'ll Value { if let OperandValue::Pair(a, b) = self.val { - let llty = self.layout.llvm_type(bx.cx); + let llty = self.layout.llvm_type(bx.cx()); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. @@ -200,13 +200,13 @@ impl OperandRef<'tcx, &'ll Value> { &self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, i: usize ) -> OperandRef<'tcx, &'ll Value> { - let field = self.layout.field(bx.cx, i); + let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx, field); + return OperandRef::new_zst(bx.cx(), field); } // Newtype of a scalar, scalar pair or vector. @@ -219,12 +219,12 @@ impl OperandRef<'tcx, &'ll Value> { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx)); + assert_eq!(field.size, a.value.size(bx.cx())); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx) - .abi_align(b.value.align(bx.cx))); - assert_eq!(field.size, b.value.size(bx.cx)); + assert_eq!(offset, a.value.size(bx.cx()) + .abi_align(b.value.align(bx.cx()))); + assert_eq!(field.size, b.value.size(bx.cx())); OperandValue::Immediate(b_llval) } } @@ -232,7 +232,7 @@ impl OperandRef<'tcx, &'ll Value> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, CodegenCx::c_usize(bx.cx, i as u64))) + bx.extract_element(llval, CodegenCx::c_usize(bx.cx(), i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -241,11 +241,11 @@ impl OperandRef<'tcx, &'ll Value> { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. match val { OperandValue::Immediate(ref mut llval) => { - *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx)); + *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx())); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true)); - *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true)); + *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx(), 0, true)); + *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx(), 1, true)); } OperandValue::Ref(..) => bug!() } @@ -349,7 +349,7 @@ impl OperandValue<&'ll Value> { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(Type::i8(bx.cx()), llsize, "unsized_tmp", max_align); base::call_memcpy(bx, lldst, llptr, llsize, min_align, flags); // Store the allocated region and the extra to the indirect place. @@ -394,9 +394,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // ZSTs don't require any actual memory access. // FIXME(eddyb) deduplicate this with the identical // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx, 0); + let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx, elem)); + return Some(OperandRef::new_zst(bx.cx(), elem)); } } _ => {} @@ -415,11 +415,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { debug!("codegen_consume(place={:?})", place); let ty = self.monomorphized_place_ty(place); - let layout = bx.cx.layout_of(ty); + let layout = bx.cx().layout_of(ty); // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx, layout); + return OperandRef::new_zst(bx.cx(), layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { @@ -455,12 +455,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { ); // Allow RalfJ to sleep soundly knowing that even refactorings that remove // the above error (or silence it under some conditions) will not cause UB - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); // We've errored, so we don't have to produce working code. - let layout = bx.cx.layout_of(ty); + let layout = bx.cx().layout_of(ty); PlaceRef::new_sized( - CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()), + CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to()), layout, layout.align, ).load(bx) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 624aa4e85caa7..49cd9532b66b7 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -64,15 +64,15 @@ impl PlaceRef<'tcx, &'ll Value> { alloc: &mir::interpret::Allocation, offset: Size, ) -> PlaceRef<'tcx, &'ll Value> { - let init = const_alloc_to_llvm(bx.cx, alloc); - let base_addr = consts::addr_of(bx.cx, init, layout.align, None); + let init = const_alloc_to_llvm(bx.cx(), alloc); + let base_addr = consts::addr_of(bx.cx(), init, layout.align, None); let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(bx.cx)), - &CodegenCx::c_usize(bx.cx, offset.bytes()), + consts::bitcast(base_addr, Type::i8p(bx.cx())), + &CodegenCx::c_usize(bx.cx(), offset.bytes()), 1, )}; - let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to()); + let llval = consts::bitcast(llval, layout.llvm_type(bx.cx()).ptr_to()); PlaceRef::new_sized(llval, layout, alloc.align) } @@ -80,7 +80,7 @@ impl PlaceRef<'tcx, &'ll Value> { -> PlaceRef<'tcx, &'ll Value> { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align); + let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align); Self::new_sized(tmp, layout, layout.align) } @@ -92,8 +92,8 @@ impl PlaceRef<'tcx, &'ll Value> { ) -> PlaceRef<'tcx, &'ll Value> { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty); - let ptr_layout = bx.cx.layout_of(ptr_ty); + let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty); + let ptr_layout = bx.cx().layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } @@ -116,14 +116,14 @@ impl PlaceRef<'tcx, &'ll Value> { assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); if self.layout.is_zst() { - return OperandRef::new_zst(bx.cx, self.layout); + return OperandRef::new_zst(bx.cx(), self.layout); } let scalar_load_metadata = |load, scalar: &layout::Scalar| { let vr = scalar.valid_range.clone(); match scalar.value { layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx.cx); + let range = scalar.valid_range_exclusive(bx.cx()); if range.start != range.end { bx.range_metadata(load, range); } @@ -160,7 +160,7 @@ impl PlaceRef<'tcx, &'ll Value> { let load = bx.load(llptr, self.align); scalar_load_metadata(load, scalar); if scalar.is_bool() { - bx.trunc(load, Type::i1(bx.cx)) + bx.trunc(load, Type::i1(bx.cx())) } else { load } @@ -178,7 +178,7 @@ impl PlaceRef<'tcx, &'ll Value> { self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, ix: usize ) -> PlaceRef<'tcx, &'ll Value> { - let cx = bx.cx; + let cx = bx.cx(); let field = self.layout.field(cx, ix); let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); @@ -286,7 +286,7 @@ impl PlaceRef<'tcx, &'ll Value> { bx: &Builder<'a, 'll, 'tcx, &'ll Value>, cast_to: Ty<'tcx> ) -> &'ll Value { - let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); + let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx()); if self.layout.abi.is_uninhabited() { return CodegenCx::c_undef(cast_to); } @@ -294,7 +294,7 @@ impl PlaceRef<'tcx, &'ll Value> { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index as u128, - |def| def.discriminant_for_variant(bx.cx.tcx, index).val); + |def| def.discriminant_for_variant(bx.cx().tcx, index).val); return CodegenCx::c_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | @@ -322,7 +322,7 @@ impl PlaceRef<'tcx, &'ll Value> { niche_start, .. } => { - let niche_llty = discr.layout.immediate_llvm_type(bx.cx); + let niche_llty = discr.layout.immediate_llvm_type(bx.cx()); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { @@ -350,7 +350,7 @@ impl PlaceRef<'tcx, &'ll Value> { /// Set the discriminant for a new value of the given case of the given /// representation. pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, variant_index: usize) { - if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() { + if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { return; } match self.layout.variants { @@ -363,7 +363,7 @@ impl PlaceRef<'tcx, &'ll Value> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx), to), + CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx()), to), ptr.llval, ptr.align); } @@ -378,16 +378,16 @@ impl PlaceRef<'tcx, &'ll Value> { bx.sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to()); - let fill_byte = CodegenCx::c_u8(bx.cx, 0); + let llptr = bx.pointercast(self.llval, Type::i8(bx.cx()).ptr_to()); + let fill_byte = CodegenCx::c_u8(bx.cx(), 0); let (size, align) = self.layout.size_and_align(); - let size = CodegenCx::c_usize(bx.cx, size.bytes()); - let align = CodegenCx::c_u32(bx.cx, align.abi() as u32); + let size = CodegenCx::c_usize(bx.cx(), size.bytes()); + let align = CodegenCx::c_u32(bx.cx(), align.abi() as u32); base::call_memset(bx, llptr, fill_byte, size, align, false); } let niche = self.project_field(bx, 0); - let niche_llty = niche.layout.immediate_llvm_type(bx.cx); + let niche_llty = niche.layout.immediate_llvm_type(bx.cx()); let niche_value = ((variant_index - *niche_variants.start()) as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. @@ -406,9 +406,9 @@ impl PlaceRef<'tcx, &'ll Value> { pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llindex: &'ll Value) -> PlaceRef<'tcx, &'ll Value> { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx, 0), llindex]), + llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx(), 0), llindex]), llextra: None, - layout: self.layout.field(bx.cx, 0), + layout: self.layout.field(bx.cx(), 0), align: self.align } } @@ -416,10 +416,10 @@ impl PlaceRef<'tcx, &'ll Value> { pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, variant_index: usize) -> PlaceRef<'tcx, &'ll Value> { let mut downcast = *self; - downcast.layout = self.layout.for_variant(bx.cx, variant_index); + downcast.layout = self.layout.for_variant(bx.cx(), variant_index); // Cast to the appropriate variant struct type. - let variant_ty = downcast.layout.llvm_type(bx.cx); + let variant_ty = downcast.layout.llvm_type(bx.cx()); downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to()); downcast @@ -441,7 +441,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { -> PlaceRef<'tcx, &'ll Value> { debug!("codegen_place(place={:?})", place); - let cx = bx.cx; + let cx = bx.cx(); let tcx = cx.tcx; if let mir::Place::Local(index) = *place { @@ -479,9 +479,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // and compile-time agree on values // With floats that won't always be true // so we generate an abort - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()); + let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to()); PlaceRef::new_sized(llval, layout, layout.align) } } @@ -495,7 +495,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { elem: mir::ProjectionElem::Deref }) => { // Load the pointer from its location. - self.codegen_consume(bx, base).deref(bx.cx) + self.codegen_consume(bx, base).deref(bx.cx()) } mir::Place::Projection(ref projection) => { let cg_base = self.codegen_place(bx, &projection.base); @@ -514,33 +514,33 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = CodegenCx::c_usize(bx.cx, offset as u64); + let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = CodegenCx::c_usize(bx.cx, offset as u64); - let lllen = cg_base.len(bx.cx); + let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64); + let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - CodegenCx::c_usize(bx.cx, from as u64)); + CodegenCx::c_usize(bx.cx(), from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); - subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty)); + subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - CodegenCx::c_usize(bx.cx, (from as u64) + (to as u64)))); + CodegenCx::c_usize(bx.cx(), (from as u64) + (to as u64)))); } // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - subslice.layout.llvm_type(bx.cx).ptr_to()); + subslice.layout.llvm_type(bx.cx()).ptr_to()); subslice } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 12d69adbfd772..f78d919f4298b 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -103,28 +103,28 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { return bx; } - let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx, 0)).llval; + let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx(), 0)).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = CodegenCx::c_i32(bx.cx, dest.align.abi() as i32); - let size = CodegenCx::c_usize(bx.cx, dest.layout.size.bytes()); + let align = CodegenCx::c_i32(bx.cx(), dest.align.abi() as i32); + let size = CodegenCx::c_usize(bx.cx(), dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 { - let fill = CodegenCx::c_u8(bx.cx, 0); + let fill = CodegenCx::c_u8(bx.cx(), 0); base::call_memset(&bx, start, fill, size, align, false); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&bx, v); - if CodegenCx::val_ty(v) == Type::i8(bx.cx) { + if CodegenCx::val_ty(v) == Type::i8(bx.cx()) { base::call_memset(&bx, start, v, size, align, false); return bx; } } - let count = CodegenCx::c_usize(bx.cx, count); + let count = CodegenCx::c_usize(bx.cx(), count); let end = dest.project_index(&bx, count).llval; let header_bx = bx.build_sibling_block("repeat_loop_header"); @@ -140,7 +140,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { cg_elem.val.store(&body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx, 1)]); + let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx(), 1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -210,18 +210,18 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.codegen_operand(&bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") { + if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } OperandValue::Immediate( - callee::resolve_and_get_fn(bx.cx, def_id, substs)) + callee::resolve_and_get_fn(bx.cx(), def_id, substs)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.layout.ty) @@ -232,8 +232,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(callee::get_fn(bx.cx, instance)) + bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(callee::get_fn(bx.cx(), instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -256,7 +256,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + cast.scalar_pair_element_llvm_type(bx.cx(), 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -275,12 +275,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let OperandValue::Pair(data_ptr, meta) = operand.val { if cast.is_llvm_scalar_pair() { let data_cast = bx.pointercast(data_ptr, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + cast.scalar_pair_element_llvm_type(bx.cx(), 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = cast.immediate_llvm_type(bx.cx); + let llcast_ty = cast.immediate_llvm_type(bx.cx()); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -290,7 +290,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } mir::CastKind::Misc => { assert!(cast.is_llvm_immediate()); - let ll_t_out = cast.immediate_llvm_type(bx.cx); + let ll_t_out = cast.immediate_llvm_type(bx.cx()); if operand.layout.abi.is_uninhabited() { return (bx, OperandRef { val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)), @@ -300,12 +300,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = operand.layout.immediate_llvm_type(bx.cx); + let ll_t_in = operand.layout.immediate_llvm_type(bx.cx()); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx.tcx, index) + .discriminant_for_variant(bx.cx().tcx, index) .val; let discr = CodegenCx::c_uint_big(ll_t_out, discr_val); return (bx, OperandRef { @@ -328,7 +328,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // then `i1 1` (i.e. E::B) is effectively `i8 -1`. signed = !scalar.is_bool() && s; - let er = scalar.valid_range_exclusive(bx.cx); + let er = scalar.valid_range_exclusive(bx.cx()); if er.end != er.start && scalar.valid_range.end() > scalar.valid_range.start() { // We want `table[e as usize]` to not @@ -367,7 +367,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed); + let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => @@ -394,7 +394,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. - let val = if !bx.cx.type_has_metadata(ty) { + let val = if !bx.cx().type_has_metadata(ty) { OperandValue::Immediate(cg_place.llval) } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) @@ -412,7 +412,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let size = self.evaluate_array_len(&bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), - layout: bx.cx.layout_of(bx.tcx().types.usize), + layout: bx.cx().layout_of(bx.tcx().types.usize), }; (bx, operand) } @@ -438,7 +438,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - layout: bx.cx.layout_of( + layout: bx.cx().layout_of( op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bx, operand) @@ -453,7 +453,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, - layout: bx.cx.layout_of(operand_ty) + layout: bx.cx().layout_of(operand_ty) }; (bx, operand) @@ -488,8 +488,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { - assert!(bx.cx.type_is_sized(ty)); - let val = CodegenCx::c_usize(bx.cx, bx.cx.size_of(ty).bytes()); + assert!(bx.cx().type_is_sized(ty)); + let val = CodegenCx::c_usize(bx.cx(), bx.cx().size_of(ty).bytes()); let tcx = bx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -499,11 +499,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let (size, align) = bx.cx.size_and_align_of(content_ty); - let llsize = CodegenCx::c_usize(bx.cx, size.bytes()); - let llalign = CodegenCx::c_usize(bx.cx, align.abi()); - let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = box_layout.llvm_type(bx.cx); + let (size, align) = bx.cx().size_and_align_of(content_ty); + let llsize = CodegenCx::c_usize(bx.cx(), size.bytes()); + let llalign = CodegenCx::c_usize(bx.cx(), align.abi()); + let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); + let llty_ptr = box_layout.llvm_type(bx.cx()); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { @@ -513,7 +513,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = callee::get_fn(bx.cx, instance); + let r = callee::get_fn(bx.cx(), instance); let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr); let operand = OperandRef { @@ -547,14 +547,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx.tcx); - return CodegenCx::c_usize(bx.cx, n); + let n = n.unwrap_usize(bx.cx().tcx); + return CodegenCx::c_usize(bx.cx(), n); } } } // use common size calculation for non zero-sized types let cg_value = self.codegen_place(&bx, place); - return cg_value.len(bx.cx); + return cg_value.len(bx.cx()); } pub fn codegen_scalar_binop( @@ -606,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - CodegenCx::c_bool(bx.cx, match op { + CodegenCx::c_bool(bx.cx(), match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -683,9 +683,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx, false)); + return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx(), false)); } let (val, of) = match op { @@ -817,7 +817,7 @@ fn get_overflow_intrinsic( }, }; - bx.cx.get_intrinsic(&name) + bx.cx().get_intrinsic(&name) } fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, @@ -838,7 +838,7 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, << (Single::MAX_EXP - Single::PRECISION as i16); let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); - let infinity_bits = CodegenCx::c_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); + let infinity_bits = CodegenCx::c_u32(bx.cx(), ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); bx.select(overflow, infinity, bx.uitofp(x, float_ty)) } else { @@ -907,8 +907,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, } let float_bits_to_llval = |bits| { let bits_llval = match float_ty.float_width() { - 32 => CodegenCx::c_u32(bx.cx, bits as u32), - 64 => CodegenCx::c_u64(bx.cx, bits as u64), + 32 => CodegenCx::c_u32(bx.cx(), bits as u32), + 64 => CodegenCx::c_u64(bx.cx(), bits as u64), n => bug!("unsupported float width {}", n), }; consts::bitcast(bits_llval, float_ty) From 1174b40a0a5d37cbf0062cd830ec282f17a684ea Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 29 Aug 2018 15:56:30 +0200 Subject: [PATCH 23/76] Added self argument for Codegen CommonMethod trait methods --- src/librustc_codegen_llvm/builder.rs | 6 +- src/librustc_codegen_llvm/common.rs | 26 +++---- .../interfaces/common.rs | 10 +-- src/librustc_codegen_llvm/intrinsic.rs | 70 +++++++++---------- src/librustc_codegen_llvm/meth.rs | 2 +- src/librustc_codegen_llvm/mir/block.rs | 10 +-- src/librustc_codegen_llvm/mir/constant.rs | 8 +-- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 8 +-- src/librustc_codegen_llvm/mir/place.rs | 50 ++++++------- src/librustc_codegen_llvm/mir/rvalue.rs | 47 +++++++------ 11 files changed, 120 insertions(+), 119 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index d85e8077aaa2e..250201b3224a6 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -549,8 +549,8 @@ impl BuilderMethods<'a, 'll, 'tcx> unsafe { let llty = CodegenCx::val_ty(load); let v = [ - CodegenCx::c_uint_big(llty, range.start), - CodegenCx::c_uint_big(llty, range.end) + self.cx.c_uint_big(llty, range.start), + self.cx.c_uint_big(llty, range.end) ]; llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, @@ -866,7 +866,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, CodegenCx::c_i32(self.cx, 0)); let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64); - self.shuffle_vector(vec, undef, CodegenCx::c_null(vec_i32_ty)) + self.shuffle_vector(vec, undef, self.cx.c_null(vec_i32_ty)) } } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 74a005985cd6f..b27b32efeb315 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -210,31 +210,31 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } // LLVM constant constructors. - fn c_null(t: &'ll Type) -> &'ll Value { + fn c_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) } } - fn c_undef(t: &'ll Type) -> &'ll Value { + fn c_undef(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMGetUndef(t) } } - fn c_int(t: &'ll Type, i: i64) -> &'ll Value { + fn c_int(&self, t: &'ll Type, i: i64) -> &'ll Value { unsafe { llvm::LLVMConstInt(t, i as u64, True) } } - fn c_uint(t: &'ll Type, i: u64) -> &'ll Value { + fn c_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { unsafe { llvm::LLVMConstInt(t, i, False) } } - fn c_uint_big(t: &'ll Type, u: u128) -> &'ll Value { + fn c_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { unsafe { let words = [u as u64, (u >> 64) as u64]; llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) @@ -242,19 +242,19 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn c_bool(&self, val: bool) -> &'ll Value { - Self::c_uint(Type::i1(&self), val as u64) + &self.c_uint(Type::i1(&self), val as u64) } fn c_i32(&self, i: i32) -> &'ll Value { - Self::c_int(Type::i32(&self), i as i64) + &self.c_int(Type::i32(&self), i as i64) } fn c_u32(&self, i: u32) -> &'ll Value { - Self::c_uint(Type::i32(&self), i as u64) + &self.c_uint(Type::i32(&self), i as u64) } fn c_u64(&self, i: u64) -> &'ll Value { - Self::c_uint(Type::i64(&self), i) + &self.c_uint(Type::i64(&self), i) } fn c_usize(&self, i: u64) -> &'ll Value { @@ -264,11 +264,11 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { assert!(i < (1< &'ll Value { - Self::c_uint(Type::i8(&self), i as u64) + &self.c_uint(Type::i8(&self), i as u64) } @@ -491,9 +491,9 @@ pub fn shift_mask_val( // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. let val = llty.int_width() - 1; if invert { - CodegenCx::c_int(mask_llty, !val as i64) + bx.cx.c_int(mask_llty, !val as i64) } else { - CodegenCx::c_uint(mask_llty, val) + bx.cx.c_uint(mask_llty, val) } }, TypeKind::Vector => { diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs index c43e3b7504a12..fe587bd8f83ea 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -15,11 +15,11 @@ pub trait CommonMethods : Backend { fn val_ty(v: Self::Value) -> Self::Type; // Constant constructors - fn c_null(t: Self::Type) -> Self::Value; - fn c_undef(t: Self::Type) -> Self::Value; - fn c_int(t: Self::Type, i: i64) -> Self::Value; - fn c_uint(t: Self::Type, i: u64) -> Self::Value; - fn c_uint_big(t: Self::Type, u: u128) -> Self::Value; + fn c_null(&self, t: Self::Type) -> Self::Value; + fn c_undef(&self, t: Self::Type) -> Self::Value; + fn c_int(&self, t: Self::Type, i: i64) -> Self::Value; + fn c_uint(&self, t: Self::Type, i: u64) -> Self::Value; + fn c_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; fn c_bool(&self, val: bool) -> Self::Value; fn c_i32(&self, i: i32) -> Self::Value; fn c_u32(&self, i: u32) -> Self::Value; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index a270daa13aa37..ccd1829e288a7 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -126,11 +126,11 @@ pub fn codegen_intrinsic_call( }, "likely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), CodegenCx::c_bool(cx, true)], None) + bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(true)], None) } "unlikely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), CodegenCx::c_bool(cx, false)], None) + bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(false)], None) } "try" => { try_intrinsic(bx, cx, @@ -146,7 +146,7 @@ pub fn codegen_intrinsic_call( } "size_of" => { let tp_ty = substs.type_at(0); - CodegenCx::c_usize(cx, cx.size_of(tp_ty).bytes()) + cx.c_usize(cx.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); @@ -155,12 +155,12 @@ pub fn codegen_intrinsic_call( glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); llsize } else { - CodegenCx::c_usize(cx, cx.size_of(tp_ty).bytes()) + cx.c_usize(cx.size_of(tp_ty).bytes()) } } "min_align_of" => { let tp_ty = substs.type_at(0); - CodegenCx::c_usize(cx, cx.align_of(tp_ty).abi()) + cx.c_usize(cx.align_of(tp_ty).abi()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -169,20 +169,20 @@ pub fn codegen_intrinsic_call( glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); llalign } else { - CodegenCx::c_usize(cx, cx.align_of(tp_ty).abi()) + cx.c_usize(cx.align_of(tp_ty).abi()) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - CodegenCx::c_usize(cx, cx.align_of(tp_ty).pref()) + cx.c_usize(cx.align_of(tp_ty).pref()) } "type_name" => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - CodegenCx::c_str_slice(cx, ty_name) + cx.c_str_slice(ty_name) } "type_id" => { - CodegenCx::c_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) + cx.c_u64(cx.tcx.type_id_hash(substs.type_at(0))) } "init" => { let ty = substs.type_at(0); @@ -196,8 +196,8 @@ pub fn codegen_intrinsic_call( false, ty, llresult, - CodegenCx::c_u8(cx, 0), - CodegenCx::c_usize(cx, 1) + cx.c_u8(0), + cx.c_usize(1) ); } return; @@ -209,7 +209,7 @@ pub fn codegen_intrinsic_call( "needs_drop" => { let tp_ty = substs.type_at(0); - CodegenCx::c_bool(cx, bx.cx().type_needs_drop(tp_ty)) + cx.c_bool(bx.cx().type_needs_drop(tp_ty)) } "offset" => { let ptr = args[0].immediate(); @@ -286,9 +286,9 @@ pub fn codegen_intrinsic_call( }; bx.call(expect, &[ args[0].immediate(), - CodegenCx::c_i32(cx, rw), + cx.c_i32(rw), args[1].immediate(), - CodegenCx::c_i32(cx, cache_type) + cx.c_i32(cache_type) ], None) }, "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | @@ -300,12 +300,12 @@ pub fn codegen_intrinsic_call( Some((width, signed)) => match name { "ctlz" | "cttz" => { - let y = CodegenCx::c_bool(bx.cx(), false); + let y = cx.c_bool(false); let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); bx.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { - let y = CodegenCx::c_bool(bx.cx(), true); + let y = cx.c_bool(true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llfn = cx.get_intrinsic(llvm_name); bx.call(llfn, &[args[0].immediate(), y], None) @@ -705,8 +705,8 @@ fn copy_intrinsic( ) -> &'ll Value { let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); - let size = CodegenCx::c_usize(cx, size.bytes()); - let align = CodegenCx::c_i32(cx, align.abi() as i32); + let size = cx.c_usize(size.bytes()); + let align = cx.c_i32(align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -726,7 +726,7 @@ fn copy_intrinsic( src_ptr, bx.mul(size, count), align, - CodegenCx::c_bool(cx, volatile)], + cx.c_bool(volatile)], None) } @@ -740,8 +740,8 @@ fn memset_intrinsic( ) -> &'ll Value { let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); - let size = CodegenCx::c_usize(cx, size.bytes()); - let align = CodegenCx::c_i32(cx, align.abi() as i32); + let size = cx.c_usize(size.bytes()); + let align = cx.c_i32(align.abi() as i32); let dst = bx.pointercast(dst, Type::i8p(cx)); call_memset(bx, dst, val, bx.mul(size, count), align, volatile) } @@ -757,7 +757,7 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(CodegenCx::c_null(Type::i8p(&bx.cx())), dest, ptr_align); + bx.store(bx.cx().c_null(Type::i8p(&bx.cx())), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { @@ -838,7 +838,7 @@ fn codegen_msvc_try( let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(CodegenCx::c_i32(cx, 0)); + normal.ret(cx.c_i32(0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); @@ -848,19 +848,19 @@ fn codegen_msvc_try( Some(did) => ::consts::get_static(cx, did), None => bug!("msvc_try_filter not defined"), }; - let tok = catchpad.catch_pad(cs, &[tydesc, CodegenCx::c_i32(cx, 0), slot]); + let tok = catchpad.catch_pad(cs, &[tydesc, cx.c_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); let i64_align = bx.tcx().data_layout.i64_align; let arg1 = catchpad.load(addr, i64_align); - let val1 = CodegenCx::c_i32(cx, 1); + let val1 = cx.c_i32(1); let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); catchpad.store(arg1, local_ptr, i64_align); catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); catchpad.catch_ret(tok, caught.llbb()); - caught.ret(CodegenCx::c_i32(cx, 1)); + caught.ret(cx.c_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -916,7 +916,7 @@ fn codegen_gnu_try( let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(CodegenCx::c_i32(cx, 0)); + then.ret(cx.c_i32(0)); // Type indicator for the exception being thrown. // @@ -926,11 +926,11 @@ fn codegen_gnu_try( // rust_try ignores the selector. let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); - catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx))); + catch.add_clause(vals, bx.cx().c_null(Type::i8p(cx))); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); - catch.ret(CodegenCx::c_i32(cx, 1)); + catch.ret(cx.c_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -1119,13 +1119,13 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(CodegenCx::c_i32(bx.cx(), idx as i32)), + Some(idx) => Some(bx.cx().c_i32(idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(CodegenCx::c_null(llret_ty)) + None => return Ok(bx.cx().c_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), @@ -1381,7 +1381,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = Type::i32(bx.cx()); - let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32); + let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1481,7 +1481,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = Type::i32(bx.cx()); - let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32); + let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1559,8 +1559,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => CodegenCx::c_undef(Type::f32(bx.cx())), - 64 => CodegenCx::c_undef(Type::f64(bx.cx())), + 32 => bx.cx().c_undef(Type::f32(bx.cx())), + 64 => bx.cx().c_undef(Type::f64(bx.cx())), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 8c08428f83224..fc0a7c90e67f1 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -96,7 +96,7 @@ pub fn get_vtable( } // Not in the cache. Build it. - let nullptr = CodegenCx::c_null(Type::i8p(cx)); + let nullptr = cx.c_null(Type::i8p(cx)); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 9ce8887045570..a7e3f54c0209b 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -172,7 +172,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { slot.storage_dead(&bx); if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = CodegenCx::c_undef(self.landing_pad_type()); + let mut lp = bx.cx().c_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); @@ -210,7 +210,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } else { let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); - let llval = CodegenCx::c_uint_big(switch_llty, values[0]); + let llval = bx.cx().c_uint_big(switch_llty, values[0]); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } @@ -221,7 +221,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { values.len()); let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); for (&value, target) in values.iter().zip(targets) { - let llval = CodegenCx::c_uint_big(switch_llty, value); + let llval =bx.cx().c_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -563,7 +563,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx()).ptr_to()) + bx.cx().c_undef(fn_ty.ret.memory_ty(bx.cx()).ptr_to()) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -704,7 +704,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { arg: &ArgType<'tcx, Ty<'tcx>>) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx()))); + llargs.push(bx.cx().c_undef(ty.llvm_type(bx.cx()))); } if arg.is_ignore() { diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index b373f606da7e4..849aa94db3196 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -41,11 +41,11 @@ pub fn scalar_to_llvm( match cv { Scalar::Bits { size: 0, .. } => { assert_eq!(0, layout.value.size(cx).bytes()); - CodegenCx::c_undef(Type::ix(cx, 0)) + cx.c_undef(Type::ix(cx, 0)) }, Scalar::Bits { bits, size } => { assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = CodegenCx::c_uint_big(Type::ix(cx, bitsize), bits); + let llval = cx.c_uint_big(Type::ix(cx, bitsize), bits); if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { @@ -74,7 +74,7 @@ pub fn scalar_to_llvm( }; let llval = unsafe { llvm::LLVMConstInBoundsGEP( consts::bitcast(base_addr, Type::i8p(cx)), - &CodegenCx::c_usize(cx, ptr.offset.bytes()), + &cx.c_usize(ptr.offset.bytes()), 1, ) }; if layout.value != layout::Pointer { @@ -219,7 +219,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let ty = self.monomorphize(&ty); let llty = bx.cx().layout_of(ty).llvm_type(bx.cx()); - (CodegenCx::c_undef(llty), ty) + (bx.cx().c_undef(llty), ty) }) } } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 12eae8a9155bb..dbfcf7d866e4d 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -419,7 +419,7 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = CodegenCx::c_null(Type::i8p(bx.cx())); + let null = bx.cx().c_null(Type::i8p(bx.cx())); let sixty_four = CodegenCx::c_i32(bx.cx(), 64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 3c43e4cc271e2..d9975adb2d0c4 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -74,7 +74,7 @@ impl OperandRef<'tcx, &'ll Value> { layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(CodegenCx::c_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(cx.c_undef(layout.immediate_llvm_type(cx))), layout } } @@ -168,7 +168,7 @@ impl OperandRef<'tcx, &'ll Value> { debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = CodegenCx::c_undef(llty); + let mut llpair = bx.cx().c_undef(llty); llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); llpair @@ -232,7 +232,7 @@ impl OperandRef<'tcx, &'ll Value> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, CodegenCx::c_usize(bx.cx(), i as u64))) + bx.extract_element(llval, bx.cx().c_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -460,7 +460,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let layout = bx.cx().layout_of(ty); PlaceRef::new_sized( - CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to()), + bx.cx().c_undef(layout.llvm_type(bx.cx()).ptr_to()), layout, layout.align, ).load(bx) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 49cd9532b66b7..a5b5e73a5bb25 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -69,7 +69,7 @@ impl PlaceRef<'tcx, &'ll Value> { let llval = unsafe { LLVMConstInBoundsGEP( consts::bitcast(base_addr, Type::i8p(bx.cx())), - &CodegenCx::c_usize(bx.cx(), offset.bytes()), + &bx.cx().c_usize(offset.bytes()), 1, )}; let llval = consts::bitcast(llval, layout.llvm_type(bx.cx()).ptr_to()); @@ -103,7 +103,7 @@ impl PlaceRef<'tcx, &'ll Value> { assert_eq!(count, 0); self.llextra.unwrap() } else { - CodegenCx::c_usize(cx, count) + cx.c_usize(count) } } else { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) @@ -247,7 +247,7 @@ impl PlaceRef<'tcx, &'ll Value> { let meta = self.llextra; - let unaligned_offset = CodegenCx::c_usize(cx, offset.bytes()); + let unaligned_offset = cx.c_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -258,7 +258,7 @@ impl PlaceRef<'tcx, &'ll Value> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, CodegenCx::c_usize(cx, 1u64)); + let align_sub_1 = bx.sub(unsized_align, cx.c_usize(1u64)); let offset = bx.and(bx.add(unaligned_offset, align_sub_1), bx.neg(unsized_align)); @@ -288,14 +288,14 @@ impl PlaceRef<'tcx, &'ll Value> { ) -> &'ll Value { let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx()); if self.layout.abi.is_uninhabited() { - return CodegenCx::c_undef(cast_to); + return bx.cx().c_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index as u128, |def| def.discriminant_for_variant(bx.cx().tcx, index).val); - return CodegenCx::c_uint_big(cast_to, discr_val); + return bx.cx().c_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, @@ -327,21 +327,21 @@ impl PlaceRef<'tcx, &'ll Value> { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - CodegenCx::c_null(niche_llty) + bx.cx().c_null(niche_llty) } else { - CodegenCx::c_uint_big(niche_llty, niche_start) + bx.cx().c_uint_big(niche_llty, niche_start) }; bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval), - CodegenCx::c_uint(cast_to, *niche_variants.start() as u64), - CodegenCx::c_uint(cast_to, dataful_variant as u64)) + bx.cx().c_uint(cast_to, *niche_variants.start() as u64), + bx.cx().c_uint(cast_to, dataful_variant as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); - let lldiscr = bx.sub(lldiscr, CodegenCx::c_uint_big(niche_llty, delta)); - let lldiscr_max = CodegenCx::c_uint(niche_llty, *niche_variants.end() as u64); + let lldiscr = bx.sub(lldiscr, bx.cx().c_uint_big(niche_llty, delta)); + let lldiscr_max = bx.cx().c_uint(niche_llty, *niche_variants.end() as u64); bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max), bx.intcast(lldiscr, cast_to, false), - CodegenCx::c_uint(cast_to, dataful_variant as u64)) + bx.cx().c_uint(cast_to, dataful_variant as u64)) } } } @@ -363,7 +363,7 @@ impl PlaceRef<'tcx, &'ll Value> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx()), to), + bx.cx().c_uint_big(ptr.layout.llvm_type(bx.cx()), to), ptr.llval, ptr.align); } @@ -379,10 +379,10 @@ impl PlaceRef<'tcx, &'ll Value> { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. let llptr = bx.pointercast(self.llval, Type::i8(bx.cx()).ptr_to()); - let fill_byte = CodegenCx::c_u8(bx.cx(), 0); + let fill_byte = bx.cx().c_u8(0); let (size, align) = self.layout.size_and_align(); - let size = CodegenCx::c_usize(bx.cx(), size.bytes()); - let align = CodegenCx::c_u32(bx.cx(), align.abi() as u32); + let size = bx.cx().c_usize(size.bytes()); + let align = bx.cx().c_u32(align.abi() as u32); base::call_memset(bx, llptr, fill_byte, size, align, false); } @@ -393,9 +393,9 @@ impl PlaceRef<'tcx, &'ll Value> { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - CodegenCx::c_null(niche_llty) + bx.cx().c_null(niche_llty) } else { - CodegenCx::c_uint_big(niche_llty, niche_value) + bx.cx().c_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -406,7 +406,7 @@ impl PlaceRef<'tcx, &'ll Value> { pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llindex: &'ll Value) -> PlaceRef<'tcx, &'ll Value> { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx(), 0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.cx().c_usize(0), llindex]), llextra: None, layout: self.layout.field(bx.cx(), 0), align: self.align @@ -481,7 +481,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // so we generate an abort let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to()); + let llval = bx.cx().c_undef(layout.llvm_type(bx.cx()).ptr_to()); PlaceRef::new_sized(llval, layout, layout.align) } } @@ -514,27 +514,27 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64); + let lloffset = bx.cx().c_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64); + let lloffset = bx.cx().c_usize(offset as u64); let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - CodegenCx::c_usize(bx.cx(), from as u64)); + bx.cx().c_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - CodegenCx::c_usize(bx.cx(), (from as u64) + (to as u64)))); + bx.cx().c_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index f78d919f4298b..133fd3d22eed2 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -103,15 +103,15 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { return bx; } - let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx(), 0)).llval; + let start = dest.project_index(&bx, bx.cx().c_usize(0)).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = CodegenCx::c_i32(bx.cx(), dest.align.abi() as i32); - let size = CodegenCx::c_usize(bx.cx(), dest.layout.size.bytes()); + let align = bx.cx().c_i32(dest.align.abi() as i32); + let size = bx.cx().c_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 { - let fill = CodegenCx::c_u8(bx.cx(), 0); + let fill = bx.cx().c_u8(0); base::call_memset(&bx, start, fill, size, align, false); return bx; } @@ -124,7 +124,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - let count = CodegenCx::c_usize(bx.cx(), count); + let count = bx.cx().c_usize(count); let end = dest.project_index(&bx, count).llval; let header_bx = bx.build_sibling_block("repeat_loop_header"); @@ -140,7 +140,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { cg_elem.val.store(&body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx(), 1)]); + let next = body_bx.inbounds_gep(current, &[bx.cx().c_usize(1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -292,8 +292,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { assert!(cast.is_llvm_immediate()); let ll_t_out = cast.immediate_llvm_type(bx.cx()); if operand.layout.abi.is_uninhabited() { + let val = OperandValue::Immediate(bx.cx().c_undef(ll_t_out)); return (bx, OperandRef { - val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)), + val, layout: cast, }); } @@ -307,7 +308,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let discr_val = def .discriminant_for_variant(bx.cx().tcx, index) .val; - let discr = CodegenCx::c_uint_big(ll_t_out, discr_val); + let discr = bx.cx().c_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -338,7 +339,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { base::call_assume(&bx, bx.icmp( IntPredicate::IntULE, llval, - CodegenCx::c_uint_big(ll_t_in, *scalar.valid_range.end()) + bx.cx().c_uint_big(ll_t_in, *scalar.valid_range.end()) )); } } @@ -489,7 +490,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bx.cx().type_is_sized(ty)); - let val = CodegenCx::c_usize(bx.cx(), bx.cx().size_of(ty).bytes()); + let val = bx.cx().c_usize(bx.cx().size_of(ty).bytes()); let tcx = bx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -500,8 +501,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let (size, align) = bx.cx().size_and_align_of(content_ty); - let llsize = CodegenCx::c_usize(bx.cx(), size.bytes()); - let llalign = CodegenCx::c_usize(bx.cx(), align.abi()); + let llsize = bx.cx().c_usize(size.bytes()); + let llalign = bx.cx().c_usize(align.abi()); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); let llty_ptr = box_layout.llvm_type(bx.cx()); @@ -548,7 +549,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { let n = n.unwrap_usize(bx.cx().tcx); - return CodegenCx::c_usize(bx.cx(), n); + return bx.cx().c_usize(n); } } } @@ -606,7 +607,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - CodegenCx::c_bool(bx.cx(), match op { + bx.cx().c_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -685,7 +686,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // while the current crate doesn't use overflow checks. if !bx.cx().check_overflow { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx(), false)); + return OperandValue::Pair(val, bx.cx().c_bool(false)); } let (val, of) = match op { @@ -709,7 +710,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(IntPredicate::IntNE, outer_bits, CodegenCx::c_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().c_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -836,9 +837,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = bx.cx().c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); - let infinity_bits = CodegenCx::c_u32(bx.cx(), ieee::Single::INFINITY.to_bits() as u32); + let infinity_bits = bx.cx().c_u32(ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); bx.select(overflow, infinity, bx.uitofp(x, float_ty)) } else { @@ -907,8 +908,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, } let float_bits_to_llval = |bits| { let bits_llval = match float_ty.float_width() { - 32 => CodegenCx::c_u32(bx.cx(), bits as u32), - 64 => CodegenCx::c_u64(bx.cx(), bits as u64), + 32 => bx.cx().c_u32(bits as u32), + 64 => bx.cx().c_u64(bits as u64), n => bug!("unsupported float width {}", n), }; consts::bitcast(bits_llval, float_ty) @@ -963,8 +964,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - let int_max = CodegenCx::c_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = CodegenCx::c_uint_big(int_ty, int_min(signed, int_ty) as u128); + let int_max = bx.cx().c_uint_big(int_ty, int_max(signed, int_ty)); + let int_min = bx.cx().c_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -973,7 +974,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, CodegenCx::c_uint(int_ty, 0)) + bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().c_uint(int_ty, 0)) } else { s1 } From a054141f20f0546648fe3c3296d5023ebd1d8fe3 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 29 Aug 2018 16:40:47 +0200 Subject: [PATCH 24/76] Split CommonMethods to accomodate for use in back/write.rs --- src/librustc_codegen_llvm/back/write.rs | 2 +- src/librustc_codegen_llvm/base.rs | 2 +- src/librustc_codegen_llvm/builder.rs | 2 +- src/librustc_codegen_llvm/callee.rs | 2 +- src/librustc_codegen_llvm/common.rs | 29 ++++++++++--------- src/librustc_codegen_llvm/consts.rs | 2 +- .../interfaces/common.rs | 10 ++++--- src/librustc_codegen_llvm/interfaces/mod.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- 9 files changed, 29 insertions(+), 24 deletions(-) diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 171c0de3f2724..ea28c0f5f0108 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -46,7 +46,7 @@ use syntax_pos::MultiSpan; use syntax_pos::symbol::Symbol; use type_::Type; use context::{is_pie_binary, get_reloc_model, CodegenCx}; -use interfaces::CommonMethods; +use interfaces::CommonWriteMethods; use jobserver::{Client, Acquired}; use rustc_demangle; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index e5e2f37d84688..ba8fcc78a1f20 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -74,7 +74,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods}; use std::any::Any; use std::ffi::CString; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 250201b3224a6..b20280d133336 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, Backend, CommonMethods}; +use interfaces::{BuilderMethods, Backend, CommonMethods, CommonWriteMethods}; use syntax; use std::borrow::Cow; diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index d5e9ad9d9ea4f..8765e6dcd3be5 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -22,7 +22,7 @@ use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::CommonMethods; +use interfaces::CommonWriteMethods; use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index b27b32efeb315..9613352adb15e 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -24,7 +24,7 @@ use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{Backend, CommonMethods}; +use interfaces::{Backend, CommonMethods, CommonWriteMethods}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; @@ -203,11 +203,6 @@ impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { } impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { - fn val_ty(v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } - } // LLVM constant constructors. fn c_null(&self, t: &'ll Type) -> &'ll Value { @@ -356,13 +351,6 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { Self::c_bytes_in_context(&self.llcx, bytes) } - fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); - } - } - fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { unsafe { assert_eq!(idx as c_uint as u64, idx); @@ -552,3 +540,18 @@ pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, _ => bug!("unexpected type {:?} to ty_fn_sig", ty) } } + +impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { + fn val_ty(v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } + } + + fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } + } +} diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 450c8145d37e9..50d52be61744d 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -24,7 +24,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::CommonMethods; +use interfaces::CommonWriteMethods; use rustc::ty::layout::{Align, LayoutOf}; diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs index fe587bd8f83ea..f58338f384f95 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -11,9 +11,7 @@ use super::Backend; use syntax::symbol::LocalInternedString; -pub trait CommonMethods : Backend { - fn val_ty(v: Self::Value) -> Self::Type; - +pub trait CommonMethods : Backend + CommonWriteMethods { // Constant constructors fn c_null(&self, t: Self::Type) -> Self::Value; fn c_undef(&self, t: Self::Type) -> Self::Value; @@ -50,7 +48,6 @@ pub trait CommonMethods : Backend { fn c_array(ty: Self::Type, elts: &[Self::Value]) -> Self::Value; fn c_vector(elts: &[Self::Value]) -> Self::Value; fn c_bytes(&self, bytes: &[u8]) -> Self::Value; - fn c_bytes_in_context(llcx: Self::Context, bytes: &[u8]) -> Self::Value; fn const_get_elt(v: Self::Value, idx: u64) -> Self::Value; fn const_get_real(v: Self::Value) -> Option<(f64, bool)>; @@ -59,3 +56,8 @@ pub trait CommonMethods : Backend { fn is_const_real(v: Self::Value) -> bool; fn const_to_opt_u128(v: Self::Value, sign_ext: bool) -> Option; } + +pub trait CommonWriteMethods : Backend { + fn val_ty(v: Self::Value) -> Self::Type; + fn c_bytes_in_context(llcx: Self::Context, bytes: &[u8]) -> Self::Value; +} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 77db6393f6c1e..e8ece54718efe 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -14,4 +14,4 @@ mod common; pub use self::builder::BuilderMethods; pub use self::backend::Backend; -pub use self::common::CommonMethods; +pub use self::common::{CommonMethods, CommonWriteMethods}; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 133fd3d22eed2..3b76d0202a790 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -27,7 +27,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; From c67788df6baa035b9aba4e4ee500479629cac8a4 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 29 Aug 2018 18:42:25 +0200 Subject: [PATCH 25/76] CommonWriteMethods are not static any more --- src/librustc_codegen_llvm/back/lto.rs | 2 + src/librustc_codegen_llvm/back/write.rs | 66 +++++++++++++++---- src/librustc_codegen_llvm/base.rs | 25 +++---- src/librustc_codegen_llvm/builder.rs | 16 ++--- src/librustc_codegen_llvm/callee.rs | 4 +- src/librustc_codegen_llvm/common.rs | 64 +++++++++--------- src/librustc_codegen_llvm/consts.rs | 6 +- .../interfaces/common.rs | 15 +++-- src/librustc_codegen_llvm/lib.rs | 58 ++++++++++++++-- src/librustc_codegen_llvm/mir/rvalue.rs | 8 +-- 10 files changed, 179 insertions(+), 85 deletions(-) diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 61856236a1491..5392187a1b491 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -26,6 +26,7 @@ use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; use {ModuleCodegen, ModuleLlvm, ModuleKind}; +use std::marker::PhantomData; use libc; @@ -763,6 +764,7 @@ impl ThinModule { llmod_raw, llcx, tm, + phantom: PhantomData }, name: self.name().to_string(), kind: ModuleKind::Regular, diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index ea28c0f5f0108..c073ffdc77d33 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -27,7 +27,7 @@ use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitize use rustc::session::Session; use rustc::util::nodemap::FxHashMap; use time_graph::{self, TimeGraph, Timeline}; -use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; +use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock, True}; use llvm_util; use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, CachedModuleCodegen}; @@ -45,10 +45,12 @@ use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use syntax_pos::symbol::Symbol; use type_::Type; -use context::{is_pie_binary, get_reloc_model, CodegenCx}; -use interfaces::CommonWriteMethods; +use context::{is_pie_binary, get_reloc_model}; +use interfaces::{Backend, CommonWriteMethods}; use jobserver::{Client, Acquired}; use rustc_demangle; +use value::Value; +use std::marker::PhantomData; use std::any::Any; use std::ffi::{CString, CStr}; @@ -347,7 +349,7 @@ struct AssemblerCommand { /// Additional resources used by optimize_and_codegen (not module specific) #[derive(Clone)] -pub struct CodegenContext { +pub struct CodegenContext<'ll> { // Resources needed when running LTO pub time_passes: bool, pub lto: Lto, @@ -389,9 +391,12 @@ pub struct CodegenContext { time_graph: Option, // The assembler command if no_integrated_as option is enabled, None otherwise assembler_cmd: Option>, + // This field is used to give a lifetime parameter to the struct so that it can implement + // the Backend trait. + phantom: PhantomData<&'ll ()> } -impl CodegenContext { +impl CodegenContext<'ll> { pub fn create_diag_handler(&self) -> Handler { Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) } @@ -419,13 +424,49 @@ impl CodegenContext { } } +impl<'ll> Backend for CodegenContext<'ll> { + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; + type Type = &'ll Type; + type Context = &'ll llvm::Context; +} + +impl CommonWriteMethods for CodegenContext<'ll> { + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } + } + + fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } + } + + fn c_struct_in_context( + &self, + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, + ) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as llvm::Bool) + } + } +} + + pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a Handler), + data: *mut (&'a CodegenContext<'a>, &'a Handler), llcx: &'a llvm::Context, } impl<'a> DiagnosticHandlers<'a> { - pub fn new(cgcx: &'a CodegenContext, + pub fn new(cgcx: &'a CodegenContext<'a>, handler: &'a Handler, llcx: &'a llvm::Context) -> Self { let data = Box::into_raw(Box::new((cgcx, handler))); @@ -889,10 +930,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = CodegenCx::c_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = cgcx.c_bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, - CodegenCx::val_ty(llconst), + cgcx.val_ty(llconst), "rustc.embedded.module\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -909,10 +950,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = CodegenCx::c_bytes_in_context(llcx, &[]); + let llconst = cgcx.c_bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, - CodegenCx::val_ty(llconst), + cgcx.val_ty(llconst), "rustc.embedded.cmdline\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -1626,6 +1667,7 @@ fn start_executing_work(tcx: TyCtxt, target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), debuginfo: tcx.sess.opts.debuginfo, assembler_cmd, + phantom: PhantomData }; // This is the "main loop" of parallel work happening for parallel codegen. @@ -2094,7 +2136,7 @@ pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); -fn spawn_work(cgcx: CodegenContext, work: WorkItem) { +fn spawn_work(cgcx: CodegenContext<'static>, work: WorkItem) { let depth = time_depth(); thread::spawn(move || { diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index ba8fcc78a1f20..c90cb5223198b 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -335,12 +335,13 @@ pub fn coerce_unsized_into( } pub fn cast_shift_expr_rhs( - cx: &Builder<'_, 'll, '_, &'ll Value>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value + bx: &Builder<'_, 'll, '_, &'ll Value>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value ) -> &'ll Value { - cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) + cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) } -fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, +fn cast_shift_rhs<'ll, F, G>(bx: &Builder<'_, 'll, '_, &'ll Value>, + op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value, trunc: F, @@ -351,8 +352,8 @@ fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, { // Shifts may have any size int on the rhs if op.is_shift() { - let mut rhs_llty = CodegenCx::val_ty(rhs); - let mut lhs_llty = CodegenCx::val_ty(lhs); + let mut rhs_llty = bx.cx().val_ty(rhs); + let mut lhs_llty = bx.cx().val_ty(lhs); if rhs_llty.kind() == TypeKind::Vector { rhs_llty = rhs_llty.element_type() } @@ -393,7 +394,7 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( bx: &Builder<'_ ,'ll, '_, &'ll Value>, val: &'ll Value ) -> &'ll Value { - if CodegenCx::val_ty(val) == Type::i1(bx.cx()) { + if bx.cx().val_ty(val) == Type::i1(bx.cx()) { bx.zext(val, Type::i8(bx.cx())) } else { val @@ -433,7 +434,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = bx.load(src, align); - let ptr = bx.pointercast(dst, CodegenCx::val_ty(val).ptr_to()); + let ptr = bx.pointercast(dst, bx.cx().val_ty(val).ptr_to()); bx.store_with_flags(val, ptr, align, flags); return; } @@ -652,12 +653,12 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = CodegenCx::c_bytes_in_context(metadata_llcx, &compressed); - let llconst = CodegenCx::c_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = llvm_module.c_bytes_in_context(metadata_llcx, &compressed); + let llconst = llvm_module.c_struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { - llvm::LLVMAddGlobal(metadata_llmod, CodegenCx::val_ty(llconst), buf.as_ptr()) + llvm::LLVMAddGlobal(metadata_llmod, llvm_module.val_ty(llconst), buf.as_ptr()) }; unsafe { llvm::LLVMSetInitializer(llglobal, llconst); @@ -1236,7 +1237,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Run replace-all-uses-with for statics that need it for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, CodegenCx::val_ty(old_g)); + let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); llvm::LLVMReplaceAllUsesWith(old_g, bitcast); llvm::LLVMDeleteGlobal(old_g); } @@ -1251,7 +1252,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, unsafe { let g = llvm::LLVMAddGlobal(cx.llmod, - CodegenCx::val_ty(array), + cx.val_ty(array), name.as_ptr()); llvm::LLVMSetInitializer(g, array); llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index b20280d133336..af479f8d89cb2 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -547,7 +547,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } unsafe { - let llty = CodegenCx::val_ty(load); + let llty = self.cx.val_ty(load); let v = [ self.cx.c_uint_big(llty, range.start), self.cx.c_uint_big(llty, range.end) @@ -781,7 +781,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let argtys = inputs.iter().map(|v| { debug!("Asm Input Type: {:?}", *v); - CodegenCx::val_ty(*v) + self.cx.val_ty(*v) }).collect::>(); debug!("Asm Output Type: {:?}", output); @@ -862,7 +862,7 @@ impl BuilderMethods<'a, 'll, 'tcx> fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { - let elt_ty = CodegenCx::val_ty(elt); + let elt_ty = self.cx.val_ty(elt); let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, CodegenCx::c_i32(self.cx, 0)); let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64); @@ -1173,8 +1173,8 @@ impl BuilderMethods<'a, 'll, 'tcx> fn check_store<'b>(&self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = CodegenCx::val_ty(ptr); - let stored_ty = CodegenCx::val_ty(val); + let dest_ptr_ty = self.cx.val_ty(ptr); + let stored_ty = self.cx.val_ty(val); let stored_ptr_ty = stored_ty.ptr_to(); assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); @@ -1194,7 +1194,7 @@ impl BuilderMethods<'a, 'll, 'tcx> typ: &str, llfn: &'ll Value, args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = CodegenCx::val_ty(llfn); + let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers while fn_ty.kind() == llvm::TypeKind::Pointer { fn_ty = fn_ty.element_type(); @@ -1206,7 +1206,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let param_tys = fn_ty.func_params(); let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| CodegenCx::val_ty(v))) + .zip(args.iter().map(|&v| self.cx().val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1217,7 +1217,7 @@ impl BuilderMethods<'a, 'll, 'tcx> .zip(args.iter()) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = CodegenCx::val_ty(actual_val); + let actual_ty = self.cx().val_ty(actual_val); if expected_ty != actual_ty { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 8765e6dcd3be5..8d60db2bb1e9a 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -84,7 +84,7 @@ pub fn get_fn( // This can occur on either a crate-local or crate-external // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - if CodegenCx::val_ty(llfn) != llptrty { + if cx.val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); consts::ptrcast(llfn, llptrty) } else { @@ -93,7 +93,7 @@ pub fn get_fn( } } else { let llfn = declare::declare_fn(cx, &sym, fn_ty); - assert_eq!(CodegenCx::val_ty(llfn), llptrty); + assert_eq!(cx.val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); if instance.def.is_inline(tcx) { diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 9613352adb15e..ceb30f16cf961 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -284,7 +284,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { s.len() as c_uint, !null_terminated as Bool); let sym = &self.generate_local_symbol_name("str"); - let g = declare::define_global(&self, &sym[..], Self::val_ty(sc)).unwrap_or_else(||{ + let g = declare::define_global(&self, &sym[..], &self.val_ty(sc)).unwrap_or_else(||{ bug!("symbol `{}` is already defined", sym); }); llvm::LLVMSetInitializer(g, sc); @@ -320,19 +320,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { elts: &[&'ll Value], packed: bool ) -> &'ll Value { - Self::c_struct_in_context(&self.llcx, elts, packed) - } - - fn c_struct_in_context( - llcx: &'a llvm::Context, - elts: &[&'a Value], - packed: bool, - ) -> &'a Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as Bool) - } + &self.c_struct_in_context(&self.llcx, elts, packed) } fn c_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { @@ -348,7 +336,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn c_bytes(&self, bytes: &[u8]) -> &'ll Value { - Self::c_bytes_in_context(&self.llcx, bytes) + &self.c_bytes_in_context(&self.llcx, bytes) } fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { @@ -413,6 +401,35 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } +impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } + } + + fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } + } + + fn c_struct_in_context( + &self, + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, + ) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) + } + } +} + + #[inline] fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { ((hi as u128) << 64) | (lo as u128) @@ -463,7 +480,7 @@ pub fn build_unchecked_rshift( } fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, rhs: &'ll Value) -> &'ll Value { - let rhs_llty = CodegenCx::val_ty(rhs); + let rhs_llty = bx.cx().val_ty(rhs); bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) } @@ -540,18 +557,3 @@ pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, _ => bug!("unexpected type {:?} to ty_fn_sig", ty) } } - -impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { - fn val_ty(v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } - } - - fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); - } - } -} diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 50d52be61744d..afb62dbc6c26a 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -74,13 +74,13 @@ pub fn addr_of_mut( Some(kind) if !cx.tcx.sess.fewer_names() => { let name = cx.generate_local_symbol_name(kind); let gv = declare::define_global(cx, &name[..], - CodegenCx::val_ty(cv)).unwrap_or_else(||{ + cx.val_ty(cv)).unwrap_or_else(||{ bug!("symbol `{}` is already defined", name); }); llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); gv }, - _ => declare::define_private_global(cx, CodegenCx::val_ty(cv)), + _ => declare::define_private_global(cx, cx.val_ty(cv)), }; llvm::LLVMSetInitializer(gv, cv); set_global_alignment(cx, gv, align); @@ -312,7 +312,7 @@ pub fn codegen_static<'a, 'tcx>( // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = CodegenCx::val_ty(v); + let mut val_llty = cx.val_ty(v); let v = if val_llty == Type::i1(cx) { val_llty = Type::i8(cx); llvm::LLVMConstZExt(v, val_llty) diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs index f58338f384f95..9eaf94cff6607 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -40,11 +40,6 @@ pub trait CommonMethods : Backend + CommonWriteMethods { elts: &[Self::Value], packed: bool ) -> Self::Value; - fn c_struct_in_context( - llcx: Self::Context, - elts: &[Self::Value], - packed: bool, - ) -> Self::Value; fn c_array(ty: Self::Type, elts: &[Self::Value]) -> Self::Value; fn c_vector(elts: &[Self::Value]) -> Self::Value; fn c_bytes(&self, bytes: &[u8]) -> Self::Value; @@ -58,6 +53,12 @@ pub trait CommonMethods : Backend + CommonWriteMethods { } pub trait CommonWriteMethods : Backend { - fn val_ty(v: Self::Value) -> Self::Type; - fn c_bytes_in_context(llcx: Self::Context, bytes: &[u8]) -> Self::Value; + fn val_ty(&self, v: Self::Value) -> Self::Type; + fn c_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value; + fn c_struct_in_context( + &self, + llcx: Self::Context, + elts: &[Self::Value], + packed: bool, + ) -> Self::Value; } diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 57b4f8dfbbcb6..5672b1e89a8b9 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -68,11 +68,16 @@ extern crate tempfile; extern crate memmap; use back::bytecode::RLIB_BYTECODE_EXTENSION; +use interfaces::{Backend, CommonWriteMethods}; +use value::Value; +use type_::Type; pub use llvm_util::target_features; use std::any::Any; use std::path::{PathBuf}; use std::sync::mpsc; +use std::marker::PhantomData; +use libc::{c_uint, c_char}; use rustc_data_structures::sync::Lrc; use rustc::dep_graph::DepGraph; @@ -274,7 +279,7 @@ struct ModuleCodegen { /// as the crate name and disambiguator. /// We currently generate these names via CodegenUnit::build_cgu_name(). name: String, - module_llvm: ModuleLlvm, + module_llvm: ModuleLlvm<'static>, kind: ModuleKind, } @@ -332,16 +337,24 @@ struct CompiledModule { bytecode_compressed: Option, } -struct ModuleLlvm { +struct ModuleLlvm<'ll> { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, tm: &'static mut llvm::TargetMachine, + phantom: PhantomData<&'ll ()> } -unsafe impl Send for ModuleLlvm { } -unsafe impl Sync for ModuleLlvm { } +impl<'ll> Backend for ModuleLlvm<'ll> { + type Value = &'ll Value; + type BasicBlock = &'ll llvm::BasicBlock; + type Type = &'ll Type; + type Context = &'ll llvm::Context; +} + +unsafe impl Send for ModuleLlvm<'ll> { } +unsafe impl Sync for ModuleLlvm<'ll> { } -impl ModuleLlvm { +impl ModuleLlvm<'ll> { fn new(sess: &Session, mod_name: &str) -> Self { unsafe { let llcx = llvm::LLVMRustContextCreate(sess.fewer_names()); @@ -351,6 +364,7 @@ impl ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(sess, false), + phantom: PhantomData } } } @@ -362,7 +376,39 @@ impl ModuleLlvm { } } -impl Drop for ModuleLlvm { +impl CommonWriteMethods for ModuleLlvm<'ll> { + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } + } + + fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext( + llcx, + ptr, + bytes.len() as c_uint, + llvm::True); + } + } + + fn c_struct_in_context( + &self, + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, + ) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as llvm::Bool) + } + } +} + +impl Drop for ModuleLlvm<'ll> { fn drop(&mut self) { unsafe { llvm::LLVMContextDispose(&mut *(self.llcx as *mut _)); diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 3b76d0202a790..02711f3a7b93c 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -118,7 +118,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&bx, v); - if CodegenCx::val_ty(v) == Type::i8(bx.cx()) { + if bx.cx().val_ty(v) == Type::i8(bx.cx()) { base::call_memset(&bx, start, v, size, align, false); return bx; } @@ -132,7 +132,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); - let current = header_bx.phi(CodegenCx::val_ty(start), &[start], &[bx.llbb()]); + let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); @@ -705,8 +705,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.extract_value(res, 1)) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = CodegenCx::val_ty(lhs); - let rhs_llty = CodegenCx::val_ty(rhs); + let lhs_llty = bx.cx().val_ty(lhs); + let rhs_llty = bx.cx().val_ty(rhs); let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); From bfea5b2093da3c2fbb836ba2fe164141d2a51677 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 30 Aug 2018 14:24:41 +0200 Subject: [PATCH 26/76] Removed code duplication for CommonWriteMethods --- src/librustc_codegen_llvm/back/write.rs | 18 ++++------- src/librustc_codegen_llvm/common.rs | 40 +++++++++++++++++-------- src/librustc_codegen_llvm/lib.rs | 20 ++----------- 3 files changed, 36 insertions(+), 42 deletions(-) diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index c073ffdc77d33..fbbc05f0e0fa4 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -27,7 +27,7 @@ use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitize use rustc::session::Session; use rustc::util::nodemap::FxHashMap; use time_graph::{self, TimeGraph, Timeline}; -use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock, True}; +use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock}; use llvm_util; use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, CachedModuleCodegen}; @@ -47,6 +47,7 @@ use syntax_pos::symbol::Symbol; use type_::Type; use context::{is_pie_binary, get_reloc_model}; use interfaces::{Backend, CommonWriteMethods}; +use common; use jobserver::{Client, Acquired}; use rustc_demangle; use value::Value; @@ -433,16 +434,11 @@ impl<'ll> Backend for CodegenContext<'ll> { impl CommonWriteMethods for CodegenContext<'ll> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } + common::val_ty(v) } fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); - } + common::c_bytes_in_context(llcx, bytes) } fn c_struct_in_context( @@ -451,11 +447,7 @@ impl CommonWriteMethods for CodegenContext<'ll> { elts: &[&'a Value], packed: bool, ) -> &'a Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as llvm::Bool) - } + common::c_struct_in_context(llcx, elts, packed) } } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index ceb30f16cf961..6273aa85e1f01 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -401,18 +401,38 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } +pub fn val_ty(v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } +} + +pub fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } +} + +pub fn c_struct_in_context( + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, +) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) + } +} + impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } + val_ty(v) } fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); - } + c_bytes_in_context(llcx, bytes) } fn c_struct_in_context( @@ -421,11 +441,7 @@ impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { elts: &[&'a Value], packed: bool, ) -> &'a Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as Bool) - } + c_struct_in_context(llcx, elts, packed) } } diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 5672b1e89a8b9..7e6c084f0160a 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -77,7 +77,6 @@ use std::any::Any; use std::path::{PathBuf}; use std::sync::mpsc; use std::marker::PhantomData; -use libc::{c_uint, c_char}; use rustc_data_structures::sync::Lrc; use rustc::dep_graph::DepGraph; @@ -378,20 +377,11 @@ impl ModuleLlvm<'ll> { impl CommonWriteMethods for ModuleLlvm<'ll> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } + common::val_ty(v) } fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext( - llcx, - ptr, - bytes.len() as c_uint, - llvm::True); - } + common::c_bytes_in_context(llcx, bytes) } fn c_struct_in_context( @@ -400,11 +390,7 @@ impl CommonWriteMethods for ModuleLlvm<'ll> { elts: &[&'a Value], packed: bool, ) -> &'a Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as llvm::Bool) - } + common::c_struct_in_context(llcx, elts, packed) } } From b588c000e91c794c1adced6160b1c89d4cb74610 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 30 Aug 2018 14:58:15 +0200 Subject: [PATCH 27/76] All CommonMethods now real methods (not static) --- src/librustc_codegen_llvm/base.rs | 2 +- src/librustc_codegen_llvm/common.rs | 20 +++++++++---------- src/librustc_codegen_llvm/glue.rs | 12 +++++------ .../interfaces/common.rs | 16 +++++++-------- src/librustc_codegen_llvm/intrinsic.rs | 8 ++++---- src/librustc_codegen_llvm/mir/block.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 3 +-- 7 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index c90cb5223198b..ae6dbf7a3165e 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -1248,7 +1248,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if !cx.used_statics.borrow().is_empty() { let name = const_cstr!("llvm.used"); let section = const_cstr!("llvm.metadata"); - let array = CodegenCx::c_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); + let array = cx.c_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); unsafe { let g = llvm::LLVMAddGlobal(cx.llmod, diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 6273aa85e1f01..0e12def22068e 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -323,13 +323,13 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { &self.c_struct_in_context(&self.llcx, elts, packed) } - fn c_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + fn c_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { unsafe { return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); } } - fn c_vector(elts: &[&'ll Value]) -> &'ll Value { + fn c_vector(&self, elts: &[&'ll Value]) -> &'ll Value { unsafe { return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); } @@ -339,7 +339,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { &self.c_bytes_in_context(&self.llcx, bytes) } - fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { + fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { unsafe { assert_eq!(idx as c_uint as u64, idx); let us = &[idx as c_uint]; @@ -352,9 +352,9 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { + fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { unsafe { - if Self::is_const_real(v) { + if self.is_const_real(v) { let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); let loses_info = if loses_info == 1 { true } else { false }; @@ -365,27 +365,27 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn const_to_uint(v: &'ll Value) -> u64 { + fn const_to_uint(&self, v: &'ll Value) -> u64 { unsafe { llvm::LLVMConstIntGetZExtValue(v) } } - fn is_const_integral(v: &'ll Value) -> bool { + fn is_const_integral(&self, v: &'ll Value) -> bool { unsafe { llvm::LLVMIsAConstantInt(v).is_some() } } - fn is_const_real(v: &'ll Value) -> bool { + fn is_const_real(&self, v: &'ll Value) -> bool { unsafe { llvm::LLVMIsAConstantFP(v).is_some() } } - fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { + fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { unsafe { - if Self::is_const_integral(v) { + if self.is_const_integral(v) { let (mut lo, mut hi) = (0u64, 0u64); let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo); diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index c667983e1e3dd..4d445cb1467e7 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -34,8 +34,8 @@ pub fn size_and_align_of_dst( let (size, align) = bx.cx().size_and_align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); - let size = CodegenCx::c_usize(bx.cx(), size.bytes()); - let align = CodegenCx::c_usize(bx.cx(), align.abi()); + let size = bx.cx().c_usize(size.bytes()); + let align = bx.cx().c_usize(align.abi()); return (size, align); } match t.sty { @@ -49,8 +49,8 @@ pub fn size_and_align_of_dst( // The info in this case is the length of the str, so the size is that // times the unit size. let (size, align) = bx.cx().size_and_align_of(unit); - (bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx(), size.bytes())), - CodegenCx::c_usize(bx.cx(), align.abi())) + (bx.mul(info.unwrap(), bx.cx().c_usize(size.bytes())), + bx.cx().c_usize(align.abi())) } _ => { let cx = bx.cx(); @@ -93,8 +93,8 @@ pub fn size_and_align_of_dst( // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = match (CodegenCx::const_to_opt_u128(sized_align, false), - CodegenCx::const_to_opt_u128(unsized_align, false)) { + let align = match (bx.cx().const_to_opt_u128(sized_align, false), + bx.cx().const_to_opt_u128(unsized_align, false)) { (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs index 9eaf94cff6607..79fed084588d7 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -40,16 +40,16 @@ pub trait CommonMethods : Backend + CommonWriteMethods { elts: &[Self::Value], packed: bool ) -> Self::Value; - fn c_array(ty: Self::Type, elts: &[Self::Value]) -> Self::Value; - fn c_vector(elts: &[Self::Value]) -> Self::Value; + fn c_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; + fn c_vector(&self, elts: &[Self::Value]) -> Self::Value; fn c_bytes(&self, bytes: &[u8]) -> Self::Value; - fn const_get_elt(v: Self::Value, idx: u64) -> Self::Value; - fn const_get_real(v: Self::Value) -> Option<(f64, bool)>; - fn const_to_uint(v: Self::Value) -> u64; - fn is_const_integral(v: Self::Value) -> bool; - fn is_const_real(v: Self::Value) -> bool; - fn const_to_opt_u128(v: Self::Value, sign_ext: bool) -> Option; + fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; + fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; + fn const_to_uint(&self, v: Self::Value) -> u64; + fn is_const_integral(&self, v: Self::Value) -> bool; + fn is_const_real(&self, v: Self::Value) -> bool; + fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; } pub trait CommonWriteMethods : Backend { diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index ccd1829e288a7..418e8eeffd5b3 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -1108,8 +1108,8 @@ fn generic_simd_intrinsic( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = CodegenCx::const_get_elt(vector, i as u64); - match CodegenCx::const_to_opt_u128(val, true) { + let val = bx.cx().const_get_elt(vector, i as u64); + match bx.cx().const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); None @@ -1130,7 +1130,7 @@ fn generic_simd_intrinsic( return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - CodegenCx::c_vector(&indices))) + bx.cx().c_vector(&indices))) } if name == "simd_insert" { @@ -1543,7 +1543,7 @@ fn generic_simd_intrinsic( // code is generated // * if the accumulator of the fmul isn't 1, incorrect // code is generated - match CodegenCx::const_get_real(acc) { + match bx.cx().const_get_real(acc) { None => return_error!("accumulator of {} is not a constant", $name), Some((v, loses_info)) => { if $name.contains("mul") && v != 1.0_f64 { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index a7e3f54c0209b..c52f83395b967 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -325,7 +325,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { let cond = self.codegen_operand(&bx, cond).immediate(); - let mut const_cond = CodegenCx::const_to_opt_u128(cond, false).map(|c| c == 1); + let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 02711f3a7b93c..712524db8209a 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -20,7 +20,6 @@ use base; use builder::Builder; use callee; use common::{self, IntPredicate, RealPredicate}; -use context::CodegenCx; use consts; use monomorphize; use type_::Type; @@ -110,7 +109,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let size = bx.cx().c_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 { + if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { let fill = bx.cx().c_u8(0); base::call_memset(&bx, start, fill, size, align, false); return bx; From 012a6e5a0ed9ba44b338fbfa6ee9573a5f6d3159 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 30 Aug 2018 15:41:59 +0200 Subject: [PATCH 28/76] Use the method form for CodegenCx everywhere --- src/librustc_codegen_llvm/abi.rs | 2 +- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/base.rs | 22 +++++++++---------- src/librustc_codegen_llvm/builder.rs | 6 ++--- src/librustc_codegen_llvm/debuginfo/gdb.rs | 4 ++-- src/librustc_codegen_llvm/glue.rs | 9 ++++---- .../interfaces/backend.rs | 4 +++- .../interfaces/common.rs | 3 ++- src/librustc_codegen_llvm/meth.rs | 10 ++++----- src/librustc_codegen_llvm/mir/block.rs | 17 ++++++-------- src/librustc_codegen_llvm/mir/constant.rs | 8 +++---- src/librustc_codegen_llvm/mir/mod.rs | 2 +- 12 files changed, 44 insertions(+), 45 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index be769a5367bba..87312d14301ee 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -242,7 +242,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { base::call_memcpy(bx, bx.pointercast(dst.llval, Type::i8p(cx)), bx.pointercast(llscratch, Type::i8p(cx)), - CodegenCx::c_usize(cx, self.layout.size.bytes()), + cx.c_usize(self.layout.size.bytes()), self.layout.align.min(scratch_align), MemFlags::empty()); diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index eb4ea9c24893a..a6407de730ede 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -111,7 +111,7 @@ pub fn codegen_inline_asm( let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx, key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = CodegenCx::c_i32(bx.cx(), ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = bx.cx().c_i32(ia.ctxt.outer().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1)); diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index ae6dbf7a3165e..6c956b3dabd78 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -158,14 +158,14 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { } } -pub fn compare_simd_types( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - lhs: &'ll Value, - rhs: &'ll Value, +pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + lhs: Builder::Value, + rhs: Builder::Value, t: Ty<'tcx>, - ret_ty: &'ll Type, + ret_ty: Builder::Type, op: hir::BinOpKind -) -> &'ll Value { +) -> Builder::Value { let signed = match t.sty { ty::Float(_) => { let cmp = bin_op_to_fcmp_predicate(op); @@ -199,7 +199,7 @@ pub fn unsized_info( let (source, target) = cx.tcx.struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { (&ty::Array(_, len), &ty::Slice(_)) => { - CodegenCx::c_usize(cx, len.unwrap_usize(cx.tcx)) + cx.c_usize(len.unwrap_usize(cx.tcx)) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker @@ -445,8 +445,8 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( let src_ptr = bx.pointercast(src, Type::i8p(cx)); let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); let size = bx.intcast(n_bytes, cx.isize_ty, false); - let align = CodegenCx::c_i32(cx, align.abi() as i32); - let volatile = CodegenCx::c_bool(cx, flags.contains(MemFlags::VOLATILE)); + let align = cx.c_i32(align.abi() as i32); + let volatile = cx.c_bool(flags.contains(MemFlags::VOLATILE)); bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } @@ -463,7 +463,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>( return; } - call_memcpy(bx, dst, src, CodegenCx::c_usize(bx.cx(), size), align, flags); + call_memcpy(bx, dst, src, bx.cx().c_usize(size), align, flags); } pub fn call_memset( @@ -477,7 +477,7 @@ pub fn call_memset( let ptr_width = &bx.cx().sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key); - let volatile = CodegenCx::c_bool(bx.cx(), volatile); + let volatile = bx.cx().c_bool(volatile); bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index af479f8d89cb2..6d1b692d2e801 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -615,7 +615,7 @@ impl BuilderMethods<'a, 'll, 'tcx> // *always* point to a metadata value of the integer 1. // // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = CodegenCx::c_i32(self.cx, 1); + let one = self.cx.c_i32(1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); } @@ -864,7 +864,7 @@ impl BuilderMethods<'a, 'll, 'tcx> unsafe { let elt_ty = self.cx.val_ty(elt); let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, CodegenCx::c_i32(self.cx, 0)); + let vec = self.insert_element(undef, elt, self.cx.c_i32(0)); let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64); self.shuffle_vector(vec, undef, self.cx.c_null(vec_i32_ty)) } @@ -1261,7 +1261,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx)); - self.call(lifetime_intrinsic, &[CodegenCx::c_u64(self.cx, size), ptr], None); + self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None); } fn call(&self, llfn: &'ll Value, args: &[&'ll Value], diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 01cf92bbd0f7c..a95e3885d9176 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -30,7 +30,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder<'_, 'll let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [CodegenCx::c_i32(bx.cx(), 0), CodegenCx::c_i32(bx.cx(), 0)]; + let indices = [bx.cx().c_i32(0), bx.cx().c_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { @@ -64,7 +64,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); - llvm::LLVMSetInitializer(section_var, CodegenCx::c_bytes(cx, section_contents)); + llvm::LLVMSetInitializer(section_var, cx.c_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 4d445cb1467e7..cab3182ebcb26 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -16,7 +16,6 @@ use std; use builder::Builder; use common::*; -use context::CodegenCx; use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; @@ -66,8 +65,8 @@ pub fn size_and_align_of_dst( let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = CodegenCx::c_usize(cx, sized_size); - let sized_align = CodegenCx::c_usize(cx, sized_align); + let sized_size = cx.c_usize(sized_size); + let sized_align = cx.c_usize(sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). @@ -98,7 +97,7 @@ pub fn size_and_align_of_dst( (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - CodegenCx::c_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) + cx.c_usize(std::cmp::max(sized_align, unsized_align) as u64) } _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align), sized_align, @@ -116,7 +115,7 @@ pub fn size_and_align_of_dst( // // `(size + (align-1)) & -align` - let addend = bx.sub(align, CodegenCx::c_usize(bx.cx(), 1)); + let addend = bx.sub(align, bx.cx().c_usize(1)); let size = bx.and(bx.add(size, addend), bx.neg(align)); (size, align) diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index 648ae15eb3fa0..0fae2c2578b60 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -8,8 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::fmt::Debug; + pub trait Backend { - type Value; + type Value : Debug + PartialEq; type BasicBlock; type Type; type Context; diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs index 79fed084588d7..19ba30e97455d 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -47,9 +47,10 @@ pub trait CommonMethods : Backend + CommonWriteMethods { fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; fn const_to_uint(&self, v: Self::Value) -> u64; + fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; + fn is_const_integral(&self, v: Self::Value) -> bool; fn is_const_real(&self, v: Self::Value) -> bool; - fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; } pub trait CommonWriteMethods : Backend { diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index fc0a7c90e67f1..4b05dcadcb89d 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -44,7 +44,7 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx()).ptr_to().ptr_to()); let ptr_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( - bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]), + bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), ptr_align ); bx.nonnull_metadata(ptr); @@ -64,7 +64,7 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx()).ptr_to()); let usize_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( - bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]), + bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), usize_align ); // Vtable loads are invariant @@ -112,11 +112,11 @@ pub fn get_vtable( // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), - CodegenCx::c_usize(cx, size.bytes()), - CodegenCx::c_usize(cx, align.abi()) + cx.c_usize(size.bytes()), + cx.c_usize(align.abi()) ].iter().cloned().chain(methods).collect(); - let vtable_const = CodegenCx::c_struct(cx, &components, false); + let vtable_const = cx.c_struct(&components, false); let align = cx.data_layout().pointer_align; let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index c52f83395b967..b569fcd281859 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -19,7 +19,6 @@ use base; use callee; use builder::{Builder, MemFlags}; use common::{self, IntPredicate}; -use context::CodegenCx; use consts; use meth; use monomorphize; @@ -348,7 +347,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Pass the condition through llvm.expect for branch hinting. let expect = bx.cx().get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx(), expected)], None); + let cond = bx.call(expect, &[cond, bx.cx().c_bool(expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -366,9 +365,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = CodegenCx::c_str_slice(bx.cx(), filename); - let line = CodegenCx::c_u32(bx.cx(), loc.line as u32); - let col = CodegenCx::c_u32(bx.cx(), loc.col.to_usize() as u32 + 1); + let filename = bx.cx().c_str_slice(filename); + let line = bx.cx().c_u32(loc.line as u32); + let col = bx.cx().c_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -379,8 +378,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = CodegenCx::c_struct(bx.cx(), - &[filename, line, col], false); + let file_line_col = bx.cx().c_struct( &[filename, line, col], false); let file_line_col = consts::addr_of(bx.cx(), file_line_col, align, @@ -391,9 +389,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = CodegenCx::c_str_slice(bx.cx(), msg_str); - let msg_file_line_col = CodegenCx::c_struct( - bx.cx(), + let msg_str = bx.cx().c_str_slice(msg_str); + let msg_file_line_col = bx.cx().c_struct( &[msg_str, filename, line, col], false ); diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 849aa94db3196..59d15ef9e0e29 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -97,7 +97,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati assert_eq!(offset as usize as u64, offset); let offset = offset as usize; if offset > next_offset { - llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset..offset])); + llvals.push(cx.c_bytes(&alloc.bytes[next_offset..offset])); } let ptr_offset = read_target_uint( layout.endian, @@ -115,10 +115,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati next_offset = offset + pointer_size; } if alloc.bytes.len() >= next_offset { - llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset ..])); + llvals.push(cx.c_bytes(&alloc.bytes[next_offset ..])); } - CodegenCx::c_struct(cx, &llvals, true) + cx.c_struct(&llvals, true) } pub fn codegen_static_initializer( @@ -208,7 +208,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bug!("simd shuffle field {:?}", field) } }).collect(); - let llval = CodegenCx::c_struct(bx.cx(), &values?, false); + let llval = bx.cx().c_struct(&values?, false); Ok((llval, c.ty)) }) .unwrap_or_else(|e| { diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index dbfcf7d866e4d..4ae772fd39c57 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -420,7 +420,7 @@ fn create_funclets( // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. let null = bx.cx().c_null(Type::i8p(bx.cx())); - let sixty_four = CodegenCx::c_i32(bx.cx(), 64); + let sixty_four = bx.cx().c_i32(64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } From 638f4ac56f16b6d38b29f232dbdad013455a22b1 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 30 Aug 2018 16:24:13 +0200 Subject: [PATCH 29/76] Small generalization of some CodegenCx methods --- src/librustc_codegen_llvm/context.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 25655ffde67c7..9475562c4928d 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -39,6 +39,7 @@ use std::cell::{Cell, RefCell}; use std::iter; use std::str; use std::sync::Arc; +use std::hash::Hash; use syntax::symbol::LocalInternedString; use abi::Abi; @@ -103,7 +104,7 @@ pub struct CodegenCx<'a, 'tcx: 'a, V> { local_gen_sym_counter: Cell, } -impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx, &'a Value> { +impl<'a, 'tcx, Value> DepGraphSafe for CodegenCx<'a, 'tcx, Value> { } pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { @@ -217,11 +218,11 @@ pub unsafe fn create_module( llmod } -impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { +impl<'a, 'tcx, Value : Eq+Hash> CodegenCx<'a, 'tcx, Value> { crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, codegen_unit: Arc>, llvm_module: &'a ::ModuleLlvm) - -> CodegenCx<'a, 'tcx, &'a Value> { + -> CodegenCx<'a, 'tcx, Value> { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these From 89cd9c07f3bdb6471feb49972e815e58c0b182c4 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 30 Aug 2018 17:50:28 +0200 Subject: [PATCH 30/76] Added definition of type trait --- src/librustc_codegen_llvm/back/write.rs | 1 + src/librustc_codegen_llvm/builder.rs | 1 + src/librustc_codegen_llvm/common.rs | 1 + .../interfaces/backend.rs | 3 +- src/librustc_codegen_llvm/interfaces/mod.rs | 2 + src/librustc_codegen_llvm/interfaces/type_.rs | 43 +++++++++++++++++++ src/librustc_codegen_llvm/lib.rs | 1 + 7 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 src/librustc_codegen_llvm/interfaces/type_.rs diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index fbbc05f0e0fa4..0e8139d6190a1 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -430,6 +430,7 @@ impl<'ll> Backend for CodegenContext<'ll> { type BasicBlock = &'ll BasicBlock; type Type = &'ll Type; type Context = &'ll llvm::Context; + type TypeKind = llvm::TypeKind; } impl CommonWriteMethods for CodegenContext<'ll> { diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 6d1b692d2e801..710516ed3d3ad 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -60,6 +60,7 @@ impl Backend for Builder<'a, 'll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll type_::Type; + type TypeKind = llvm::TypeKind; type Context = &'ll llvm::Context; } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 0e12def22068e..a6bbe0e74287e 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -199,6 +199,7 @@ impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll Type; + type TypeKind = llvm::TypeKind; type Context = &'ll llvm::Context; } diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index 0fae2c2578b60..af3d98d1dd179 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -13,6 +13,7 @@ use std::fmt::Debug; pub trait Backend { type Value : Debug + PartialEq; type BasicBlock; - type Type; + type Type : Debug + PartialEq; + type TypeKind; type Context; } diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index e8ece54718efe..3e9c7eb881d18 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -11,7 +11,9 @@ mod builder; mod backend; mod common; +mod type_; pub use self::builder::BuilderMethods; pub use self::backend::Backend; pub use self::common::{CommonMethods, CommonWriteMethods}; +pub use self::type_::TypeMethods; diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs new file mode 100644 index 0000000000000..2afa85accf99c --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -0,0 +1,43 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::backend::Backend; + +pub trait TypeMethods : Backend { + fn void(&self) -> Self::Type; + fn metadata(&self) -> Self::Type; + fn i1(&self) -> Self::Type; + fn i8(&self) -> Self::Type; + fn i16(&self) -> Self::Type; + fn i32(&self) -> Self::Type; + fn i64(&self) -> Self::Type; + fn i128(&self) -> Self::Type; + fn ix(&self, num_bites: u64) -> Self::Type; + fn f32(&self) -> Self::Type; + fn f64(&self) -> Self::Type; + fn bool(&self) -> Self::Type; + fn char(&self) -> Self::Type; + fn i8p(&self) -> Self::Type; + + fn func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn variadic_func(&self, args: &[Self::Type]) -> Self::Type; + fn struct_(&self, els: &[Self::Type], packed: bool) -> Self::Type; + fn named_struct(&self, name: &str) -> Self::Type; + fn array(&self, ty: Self::Type, len: u64) -> Self::Type; + fn vector(&self, ty: Self::Type, len: u64) -> Self::Type; + fn kind(&self, ty: Self::Type) -> Self::TypeKind; + fn set_struct_body(&self, els: &[Self::Type], packed: bool); + fn ptr_to(&self, ty: Self::Type) -> Self::Type; + fn element_type(&self, ty: Self::Type) -> Self::Type; + fn vector_length(&self, ty: Self::Type) -> usize; + fn func_params(&self, ty: Self::Type) -> Vec; + fn float_width(&self, ty: Self::Type) -> usize; + fn int_width(&self, ty: Self::Type) -> usize; +} diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 7e6c084f0160a..5c006f45ae259 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -347,6 +347,7 @@ impl<'ll> Backend for ModuleLlvm<'ll> { type Value = &'ll Value; type BasicBlock = &'ll llvm::BasicBlock; type Type = &'ll Type; + type TypeKind = llvm::TypeKind; type Context = &'ll llvm::Context; } From 1d8e6dc746c4253f1abbc82ea1b8496f3b331220 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 5 Sep 2018 14:14:03 -0700 Subject: [PATCH 31/76] Traitification of type_ methods The methods are now attached to CodegenCx instead of Type --- src/librustc_codegen_llvm/abi.rs | 34 +- src/librustc_codegen_llvm/asm.rs | 7 +- src/librustc_codegen_llvm/back/write.rs | 10 +- src/librustc_codegen_llvm/base.rs | 38 +- src/librustc_codegen_llvm/builder.rs | 24 +- src/librustc_codegen_llvm/common.rs | 27 +- src/librustc_codegen_llvm/consts.rs | 8 +- src/librustc_codegen_llvm/context.rs | 53 +-- src/librustc_codegen_llvm/debuginfo/gdb.rs | 5 +- src/librustc_codegen_llvm/interfaces/type_.rs | 10 +- src/librustc_codegen_llvm/intrinsic.rs | 92 ++--- src/librustc_codegen_llvm/meth.rs | 12 +- src/librustc_codegen_llvm/mir/block.rs | 14 +- src/librustc_codegen_llvm/mir/constant.rs | 10 +- src/librustc_codegen_llvm/mir/mod.rs | 3 +- src/librustc_codegen_llvm/mir/operand.rs | 7 +- src/librustc_codegen_llvm/mir/place.rs | 23 +- src/librustc_codegen_llvm/mir/rvalue.rs | 55 +-- src/librustc_codegen_llvm/type_.rs | 333 ++++++++---------- src/librustc_codegen_llvm/type_of.rs | 49 +-- 20 files changed, 407 insertions(+), 407 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 87312d14301ee..217d3058e5973 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -19,7 +19,7 @@ use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty}; @@ -112,16 +112,16 @@ pub trait LlvmType { impl LlvmType for Reg { fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { match self.kind { - RegKind::Integer => Type::ix(cx, self.size.bits()), + RegKind::Integer => cx.ix(self.size.bits()), RegKind::Float => { match self.size.bits() { - 32 => Type::f32(cx), - 64 => Type::f64(cx), + 32 => cx.f32(), + 64 => cx.f64(), _ => bug!("unsupported float: {:?}", self) } } RegKind::Vector => { - Type::vector(Type::i8(cx), self.size.bytes()) + cx.vector(cx.i8(), self.size.bytes()) } } } @@ -145,7 +145,7 @@ impl LlvmType for CastTarget { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return Type::array(rest_ll_unit, rest_count); + return cx.array(rest_ll_unit, rest_count); } } @@ -160,10 +160,10 @@ impl LlvmType for CastTarget { if rem_bytes != 0 { // Only integers can be really split further. assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(Type::ix(cx, rem_bytes * 8)); + args.push(cx.ix(rem_bytes * 8)); } - Type::struct_(cx, &args, false) + cx.struct_(&args, false) } } @@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to()); + let cast_dst = bx.pointercast(dst.llval, cx.ptr_to(cast.llvm_type(cx))); bx.store(val, cast_dst, self.layout.align); } else { // The actual return type is a struct, but the ABI @@ -240,8 +240,8 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // ...and then memcpy it to the intended destination. base::call_memcpy(bx, - bx.pointercast(dst.llval, Type::i8p(cx)), - bx.pointercast(llscratch, Type::i8p(cx)), + bx.pointercast(dst.llval, cx.i8p()), + bx.pointercast(llscratch, cx.i8p()), cx.c_usize(self.layout.size.bytes()), self.layout.align.min(scratch_align), MemFlags::empty()); @@ -606,14 +606,14 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { ); let llreturn_ty = match self.ret.mode { - PassMode::Ignore => Type::void(cx), + PassMode::Ignore => cx.void(), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(self.ret.memory_ty(cx).ptr_to()); - Type::void(cx) + llargument_tys.push(cx.ptr_to(self.ret.memory_ty(cx))); + cx.void() } }; @@ -639,15 +639,15 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(), + PassMode::Indirect(_, None) => cx.ptr_to(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } if self.variadic { - Type::variadic_func(&llargument_tys, llreturn_ty) + cx.variadic_func(&llargument_tys, llreturn_ty) } else { - Type::func(&llargument_tys, llreturn_ty) + cx.func(&llargument_tys, llreturn_ty) } } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index a6407de730ede..6c589deab2fcc 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -10,13 +10,12 @@ use llvm; use context::CodegenCx; -use type_::Type; use type_of::LayoutLlvmExt; use builder::Builder; use value::Value; use rustc::hir; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use mir::place::PlaceRef; use mir::operand::OperandValue; @@ -76,9 +75,9 @@ pub fn codegen_inline_asm( // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => Type::void(bx.cx()), + 0 => bx.cx().void(), 1 => output_types[0], - _ => Type::struct_(bx.cx(), &output_types, false) + _ => bx.cx().struct_(&output_types, false) }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 0e8139d6190a1..f88d190cccdba 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -452,6 +452,14 @@ impl CommonWriteMethods for CodegenContext<'ll> { } } +impl CodegenContext<'ll> { + pub fn ptr_to(&self, ty: &'ll Type) -> &'ll Type { + unsafe { + llvm::LLVMPointerType(ty, 0) + } + } +} + pub struct DiagnosticHandlers<'a> { data: *mut (&'a CodegenContext<'a>, &'a Handler), @@ -2577,7 +2585,7 @@ fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::M "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx(llcx); + let i8p_ty = Type::i8p_llcx(cgcx, llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage && diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 6c956b3dabd78..274c5ea630935 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -74,7 +74,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods}; +use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods, TypeMethods}; use std::any::Any; use std::ffi::CString; @@ -235,13 +235,13 @@ pub fn unsize_thin_ptr( (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to(); + let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to(); + let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { @@ -354,14 +354,14 @@ fn cast_shift_rhs<'ll, F, G>(bx: &Builder<'_, 'll, '_, &'ll Value>, if op.is_shift() { let mut rhs_llty = bx.cx().val_ty(rhs); let mut lhs_llty = bx.cx().val_ty(lhs); - if rhs_llty.kind() == TypeKind::Vector { - rhs_llty = rhs_llty.element_type() + if bx.cx().kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.cx().element_type(rhs_llty) } - if lhs_llty.kind() == TypeKind::Vector { - lhs_llty = lhs_llty.element_type() + if bx.cx().kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.cx().element_type(lhs_llty) } - let rhs_sz = rhs_llty.int_width(); - let lhs_sz = lhs_llty.int_width(); + let rhs_sz = bx.cx().int_width(rhs_llty); + let lhs_sz = bx.cx().int_width(lhs_llty); if lhs_sz < rhs_sz { trunc(rhs, lhs_llty) } else if lhs_sz > rhs_sz { @@ -394,8 +394,8 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( bx: &Builder<'_ ,'ll, '_, &'ll Value>, val: &'ll Value ) -> &'ll Value { - if bx.cx().val_ty(val) == Type::i1(bx.cx()) { - bx.zext(val, Type::i8(bx.cx())) + if bx.cx().val_ty(val) == bx.cx().i1() { + bx.zext(val, bx.cx().i8()) } else { val } @@ -418,7 +418,7 @@ pub fn to_immediate_scalar( scalar: &layout::Scalar, ) -> &'ll Value { if scalar.is_bool() { - return bx.trunc(val, Type::i1(bx.cx())); + return bx.trunc(val, bx.cx().i1()); } val } @@ -434,7 +434,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = bx.load(src, align); - let ptr = bx.pointercast(dst, bx.cx().val_ty(val).ptr_to()); + let ptr = bx.pointercast(dst, bx.cx().ptr_to(bx.cx().val_ty(val))); bx.store_with_flags(val, ptr, align, flags); return; } @@ -442,8 +442,8 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( let ptr_width = &cx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); let memcpy = cx.get_intrinsic(&key); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); + let src_ptr = bx.pointercast(src, cx.i8p()); + let dst_ptr = bx.pointercast(dst, cx.i8p()); let size = bx.intcast(n_bytes, cx.isize_ty, false); let align = cx.c_i32(align.abi() as i32); let volatile = cx.c_bool(flags.contains(MemFlags::VOLATILE)); @@ -555,7 +555,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { use_start_lang_item: bool, ) { let llfty = - Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); + cx.func(&[cx.t_int(), cx.ptr_to(cx.i8p())], cx.t_int()); let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); // Given that `main()` has no arguments, @@ -598,7 +598,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { start_def_id, cx.tcx.intern_substs(&[main_ret_ty.into()]), ); - (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()), + (start_fn, vec![bx.pointercast(rust_main, cx.ptr_to(cx.i8p())), arg_argc, arg_argv]) } else { debug!("using user-defined start fn"); @@ -606,7 +606,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { }; let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, Type::c_int(cx), true)); + bx.ret(bx.intcast(result, cx.t_int(), true)); } } @@ -1248,7 +1248,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if !cx.used_statics.borrow().is_empty() { let name = const_cstr!("llvm.used"); let section = const_cstr!("llvm.metadata"); - let array = cx.c_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); + let array = cx.c_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow()); unsafe { let g = llvm::LLVMAddGlobal(cx.llmod, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 710516ed3d3ad..b1c9b60d536bb 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, Backend, CommonMethods, CommonWriteMethods}; +use interfaces::{BuilderMethods, Backend, CommonMethods, CommonWriteMethods, TypeMethods}; use syntax; use std::borrow::Cow; @@ -786,7 +786,7 @@ impl BuilderMethods<'a, 'll, 'tcx> }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = type_::Type::func(&argtys[..], output); + let fty = &self.cx().func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); @@ -864,10 +864,10 @@ impl BuilderMethods<'a, 'll, 'tcx> fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = self.cx.val_ty(elt); - let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64)); + let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, self.cx.c_i32(0)); - let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64); - self.shuffle_vector(vec, undef, self.cx.c_null(vec_i32_ty)) + let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.cx().c_null(vec_i32_ty)) } } @@ -1176,9 +1176,9 @@ impl BuilderMethods<'a, 'll, 'tcx> ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = stored_ty.ptr_to(); + let stored_ptr_ty = self.cx.ptr_to(stored_ty); - assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); + assert_eq!(self.cx.kind(dest_ptr_ty), llvm::TypeKind::Pointer); if dest_ptr_ty == stored_ptr_ty { ptr @@ -1197,14 +1197,14 @@ impl BuilderMethods<'a, 'll, 'tcx> args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers - while fn_ty.kind() == llvm::TypeKind::Pointer { - fn_ty = fn_ty.element_type(); + while self.cx.kind(fn_ty) == llvm::TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); } - assert!(fn_ty.kind() == llvm::TypeKind::Function, + assert!(self.cx.kind(fn_ty) == llvm::TypeKind::Function, "builder::{} not passed a function, but {:?}", typ, fn_ty); - let param_tys = fn_ty.func_params(); + let param_tys = self.cx.func_params(fn_ty); let all_args_match = param_tys.iter() .zip(args.iter().map(|&v| self.cx().val_ty(v))) @@ -1261,7 +1261,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx)); + let ptr = self.pointercast(ptr, self.cx.i8p()); self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None); } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index a6bbe0e74287e..0dabca8b570f4 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -24,7 +24,7 @@ use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{Backend, CommonMethods, CommonWriteMethods}; +use interfaces::{Backend, CommonMethods, CommonWriteMethods, TypeMethods}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; @@ -238,19 +238,19 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn c_bool(&self, val: bool) -> &'ll Value { - &self.c_uint(Type::i1(&self), val as u64) + &self.c_uint(&self.i1(), val as u64) } fn c_i32(&self, i: i32) -> &'ll Value { - &self.c_int(Type::i32(&self), i as i64) + &self.c_int(&self.i32(), i as i64) } fn c_u32(&self, i: u32) -> &'ll Value { - &self.c_uint(Type::i32(&self), i as u64) + &self.c_uint(&self.i32(), i as u64) } fn c_u64(&self, i: u64) -> &'ll Value { - &self.c_uint(Type::i64(&self), i) + &self.c_uint(&self.i64(), i) } fn c_usize(&self, i: u64) -> &'ll Value { @@ -264,7 +264,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn c_u8(&self, i: u8) -> &'ll Value { - &self.c_uint(Type::i8(&self), i as u64) + &self.c_uint(&self.i8(), i as u64) } @@ -302,7 +302,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn c_str_slice(&self, s: LocalInternedString) -> &'ll Value { let len = s.len(); let cs = consts::ptrcast(&self.c_cstr(s, false), - &self.layout_of(&self.tcx.mk_str()).llvm_type(&self).ptr_to()); + &self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self))); &self.c_fat_ptr(cs, &self.c_usize(len as u64)) } @@ -507,11 +507,11 @@ pub fn shift_mask_val( mask_llty: &'ll Type, invert: bool ) -> &'ll Value { - let kind = llty.kind(); + let kind = bx.cx().kind(llty); match kind { TypeKind::Integer => { // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. - let val = llty.int_width() - 1; + let val = bx.cx().int_width(llty) - 1; if invert { bx.cx.c_int(mask_llty, !val as i64) } else { @@ -519,8 +519,13 @@ pub fn shift_mask_val( } }, TypeKind::Vector => { - let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert); - bx.vector_splat(mask_llty.vector_length(), mask) + let mask = shift_mask_val( + bx, + bx.cx().element_type(llty), + bx.cx().element_type(mask_llty), + invert + ); + bx.vector_splat(bx.cx().vector_length(mask_llty), mask) }, _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), } diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index afb62dbc6c26a..d0cf751b79ef1 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -24,7 +24,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::CommonWriteMethods; +use interfaces::{CommonWriteMethods, TypeMethods}; use rustc::ty::layout::{Align, LayoutOf}; @@ -313,8 +313,8 @@ pub fn codegen_static<'a, 'tcx>( // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected let mut val_llty = cx.val_ty(v); - let v = if val_llty == Type::i1(cx) { - val_llty = Type::i8(cx); + let v = if val_llty == cx.i1() { + val_llty = cx.i8(); llvm::LLVMConstZExt(v, val_llty) } else { v @@ -432,7 +432,7 @@ pub fn codegen_static<'a, 'tcx>( if attrs.flags.contains(CodegenFnAttrFlags::USED) { // This static will be stored in the llvm.used variable which is an array of i8* - let cast = llvm::LLVMConstPointerCast(g, Type::i8p(cx)); + let cast = llvm::LLVMConstPointerCast(g, cx.i8p()); cx.used_statics.borrow_mut().push(cast); } } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 9475562c4928d..cf9e2ab4c6ad8 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -23,6 +23,7 @@ use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; +use interfaces::TypeMethods; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -380,7 +381,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } else { "rust_eh_personality" }; - let fty = Type::variadic_func(&[], Type::i32(self)); + let fty = &self.variadic_func(&[], &self.i32()); declare::declare_cfn(self, name, fty) } }; @@ -488,7 +489,7 @@ fn declare_intrinsic( macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); + let f = declare::declare_cfn(cx, $name, cx.func(&[], $ret)); llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -496,7 +497,7 @@ fn declare_intrinsic( ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); + let f = declare::declare_cfn(cx, $name, cx.variadic_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -504,7 +505,7 @@ fn declare_intrinsic( ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); + let f = declare::declare_cfn(cx, $name, cx.func(&[$($arg),*], $ret)); llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -512,28 +513,28 @@ fn declare_intrinsic( ); } macro_rules! mk_struct { - ($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false)) + ($($field_ty:expr),*) => (cx.struct_( &[$($field_ty),*], false)) } - let i8p = Type::i8p(cx); - let void = Type::void(cx); - let i1 = Type::i1(cx); - let t_i8 = Type::i8(cx); - let t_i16 = Type::i16(cx); - let t_i32 = Type::i32(cx); - let t_i64 = Type::i64(cx); - let t_i128 = Type::i128(cx); - let t_f32 = Type::f32(cx); - let t_f64 = Type::f64(cx); - - let t_v2f32 = Type::vector(t_f32, 2); - let t_v4f32 = Type::vector(t_f32, 4); - let t_v8f32 = Type::vector(t_f32, 8); - let t_v16f32 = Type::vector(t_f32, 16); - - let t_v2f64 = Type::vector(t_f64, 2); - let t_v4f64 = Type::vector(t_f64, 4); - let t_v8f64 = Type::vector(t_f64, 8); + let i8p = cx.i8p(); + let void = cx.void(); + let i1 = cx.i1(); + let t_i8 = cx.i8(); + let t_i16 = cx.i16(); + let t_i32 = cx.i32(); + let t_i64 = cx.i64(); + let t_i128 = cx.i128(); + let t_f32 = cx.f32(); + let t_f64 = cx.f64(); + + let t_v2f32 = cx.vector(t_f32, 2); + let t_v4f32 = cx.vector(t_f32, 4); + let t_v8f32 = cx.vector(t_f32, 8); + let t_v16f32 = cx.vector(t_f32, 16); + + let t_v2f64 = cx.vector(t_f64, 2); + let t_v4f64 = cx.vector(t_f64, 4); + let t_v8f64 = cx.vector(t_f64, 8); ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); @@ -780,8 +781,8 @@ fn declare_intrinsic( ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); if cx.sess().opts.debuginfo != DebugInfo::None { - ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void); - ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void); + ifn!("llvm.dbg.declare", fn(cx.metadata(), cx.metadata()) -> void); + ifn!("llvm.dbg.value", fn(cx.metadata(), t_i64, cx.metadata()) -> void); } None diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index a95e3885d9176..e84caee924a58 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -16,9 +16,8 @@ use common::CodegenCx; use builder::Builder; use declare; use rustc::session::config::DebugInfo; -use type_::Type; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use syntax::attr; @@ -56,7 +55,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array(Type::i8(cx), + let llvm_type = cx.array(cx.i8(), section_contents.len() as u64); let section_var = declare::define_global(cx, section_var_name, diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 2afa85accf99c..7a2080e1a2671 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -22,22 +22,20 @@ pub trait TypeMethods : Backend { fn ix(&self, num_bites: u64) -> Self::Type; fn f32(&self) -> Self::Type; fn f64(&self) -> Self::Type; - fn bool(&self) -> Self::Type; - fn char(&self) -> Self::Type; - fn i8p(&self) -> Self::Type; + fn x86_mmx(&self) -> Self::Type; fn func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; - fn variadic_func(&self, args: &[Self::Type]) -> Self::Type; + fn variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn struct_(&self, els: &[Self::Type], packed: bool) -> Self::Type; fn named_struct(&self, name: &str) -> Self::Type; fn array(&self, ty: Self::Type, len: u64) -> Self::Type; fn vector(&self, ty: Self::Type, len: u64) -> Self::Type; fn kind(&self, ty: Self::Type) -> Self::TypeKind; - fn set_struct_body(&self, els: &[Self::Type], packed: bool); + fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool); fn ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; fn vector_length(&self, ty: Self::Type) -> usize; fn func_params(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; - fn int_width(&self, ty: Self::Type) -> usize; + fn int_width(&self, ty: Self::Type) -> u64; } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 418e8eeffd5b3..745d3f6a855fa 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -31,7 +31,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use rustc::session::Session; use syntax_pos::Span; @@ -251,7 +251,7 @@ pub fn codegen_intrinsic_call( let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); + ptr = bx.pointercast(ptr, bx.cx().ptr_to(ty.llvm_type(cx))); } let load = bx.volatile_load(ptr); let align = if name == "unaligned_volatile_load" { @@ -336,7 +336,7 @@ pub fn codegen_intrinsic_call( args[1].immediate() ], None); let val = bx.extract_value(pair, 0); - let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx)); + let overflow = bx.zext(bx.extract_value(pair, 1), cx.bool()); let dest = result.project_field(bx, 0); bx.store(val, dest.llval, dest.align); @@ -472,7 +472,7 @@ pub fn codegen_intrinsic_call( failorder, weak); let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx())); + let success = bx.zext(bx.extract_value(pair, 1), bx.cx().bool()); let dest = result.project_field(bx, 0); bx.store(val, dest.llval, dest.align); @@ -562,32 +562,32 @@ pub fn codegen_intrinsic_call( ) -> Vec<&'ll Type> { use intrinsics::Type::*; match *t { - Void => vec![Type::void(cx)], + Void => vec![cx.void()], Integer(_signed, _width, llvm_width) => { - vec![Type::ix(cx, llvm_width as u64)] + vec![cx.ix( llvm_width as u64)] } Float(x) => { match x { - 32 => vec![Type::f32(cx)], - 64 => vec![Type::f64(cx)], + 32 => vec![cx.f32()], + 64 => vec![cx.f64()], _ => bug!() } } Pointer(ref t, ref llvm_elem, _const) => { let t = llvm_elem.as_ref().unwrap_or(t); let elem = one(ty_to_type(cx, t)); - vec![elem.ptr_to()] + vec![cx.ptr_to(elem)] } Vector(ref t, ref llvm_elem, length) => { let t = llvm_elem.as_ref().unwrap_or(t); let elem = one(ty_to_type(cx, t)); - vec![Type::vector(elem, length as u64)] + vec![cx.vector(elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() .map(|t| one(ty_to_type(cx, t))) .collect::>(); - vec![Type::struct_(cx, &elems, false)] + vec![cx.struct_( &elems, false)] } Aggregate(true, ref contents) => { contents.iter() @@ -626,20 +626,20 @@ pub fn codegen_intrinsic_call( } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); - vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())] + vec![bx.pointercast(arg.immediate(), bx.cx().ptr_to(llvm_elem))] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); vec![ bx.bitcast(arg.immediate(), - Type::vector(llvm_elem, length as u64)) + bx.cx().vector(llvm_elem, length as u64)) ] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bx.trunc(arg.immediate(), Type::ix(bx.cx(), llvm_width as u64))] + vec![bx.trunc(arg.immediate(), bx.cx().ix(llvm_width as u64))] } _ => vec![arg.immediate()], } @@ -661,7 +661,7 @@ pub fn codegen_intrinsic_call( intrinsics::IntrinsicDef::Named(name) => { let f = declare::declare_cfn(cx, name, - Type::func(&inputs, outputs)); + cx.func(&inputs, outputs)); bx.call(f, &llargs, None) } }; @@ -685,7 +685,7 @@ pub fn codegen_intrinsic_call( if !fn_ty.ret.is_ignore() { if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to()); + let ptr = bx.pointercast(result.llval, cx.ptr_to(ty.llvm_type(cx))); bx.store(llval, ptr, result.align); } else { OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) @@ -717,8 +717,8 @@ fn copy_intrinsic( let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, cx.data_layout().pointer_size.bits()); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); + let dst_ptr = bx.pointercast(dst, cx.i8p()); + let src_ptr = bx.pointercast(src, cx.i8p()); let llfn = cx.get_intrinsic(&name); bx.call(llfn, @@ -742,7 +742,7 @@ fn memset_intrinsic( let (size, align) = cx.size_and_align_of(ty); let size = cx.c_usize(size.bytes()); let align = cx.c_i32(align.abi() as i32); - let dst = bx.pointercast(dst, Type::i8p(cx)); + let dst = bx.pointercast(dst, cx.i8p()); call_memset(bx, dst, val, bx.mul(size, count), align, volatile) } @@ -757,7 +757,7 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(bx.cx().c_null(Type::i8p(&bx.cx())), dest, ptr_align); + bx.store(cx.c_null(cx.i8p()), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { @@ -833,7 +833,7 @@ fn codegen_msvc_try( // } // // More information can be found in libstd's seh.rs implementation. - let i64p = Type::i64(cx).ptr_to(); + let i64p = cx.ptr_to(cx.i64()); let ptr_align = bx.tcx().data_layout.pointer_align; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -924,12 +924,12 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); + let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false); let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); - catch.add_clause(vals, bx.cx().c_null(Type::i8p(cx))); + catch.add_clause(vals, bx.cx().c_null(cx.i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; - catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); + catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align); catch.ret(cx.c_i32(1)); }); @@ -1072,7 +1072,7 @@ fn generic_simd_intrinsic( found `{}` with length {}", in_len, in_ty, ret_ty, out_len); - require!(llret_ty.element_type().kind() == TypeKind::Integer, + require!(bx.cx().kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer, "expected return type with integer elements, found `{}` with non-integer `{}`", ret_ty, ret_ty.simd_type(tcx)); @@ -1161,8 +1161,8 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = Type::i1(bx.cx()); - let i1xn = Type::vector(i1, m_len as u64); + let i1 = bx.cx().i1(); + let i1xn = bx.cx().vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1294,16 +1294,16 @@ fn generic_simd_intrinsic( mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { - ty::Int(v) => Type::int_from_ty(cx, v), - ty::Uint(v) => Type::uint_from_ty(cx, v), - ty::Float(v) => Type::float_from_ty(cx, v), + ty::Int(v) => cx.int_from_ty( v), + ty::Uint(v) => cx.uint_from_ty( v), + ty::Float(v) => cx.float_from_ty( v), _ => unreachable!(), }; while no_pointers > 0 { - elem_ty = elem_ty.ptr_to(); + elem_ty = cx.ptr_to(elem_ty); no_pointers -= 1; } - Type::vector(elem_ty, vec_len as u64) + cx.vector(elem_ty, vec_len as u64) } @@ -1380,13 +1380,13 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx()); + let alignment_ty = bx.cx().i32(); let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx()); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().i1(); + let i1xn = bx.cx().vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; @@ -1401,7 +1401,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, - Type::func(&[ + bx.cx().func(&[ llvm_pointer_vec_ty, alignment_ty, mask_ty, @@ -1480,17 +1480,17 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx()); + let alignment_ty = bx.cx().i32(); let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx()); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().i1(); + let i1xn = bx.cx().vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = Type::void(bx.cx()); + let ret_t = bx.cx().void(); // Type of the vector of pointers: let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); @@ -1503,7 +1503,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, - Type::func(&[llvm_elem_vec_ty, + bx.cx().func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1559,8 +1559,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => bx.cx().c_undef(Type::f32(bx.cx())), - 64 => bx.cx().c_undef(Type::f64(bx.cx())), + 32 => bx.cx().c_undef(bx.cx().f32()), + 64 => bx.cx().c_undef(bx.cx().f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1637,8 +1637,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, } // boolean reductions operate on vectors of i1s: - let i1 = Type::i1(bx.cx()); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().i1(); + let i1xn = bx.cx().vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { @@ -1648,7 +1648,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, if !$boolean { r } else { - bx.zext(r, Type::bool(bx.cx())) + bx.zext(r, bx.cx().bool()) } ) }, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 4b05dcadcb89d..24ec83148b05a 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -14,10 +14,9 @@ use context::CodegenCx; use builder::Builder; use consts; use monomorphize; -use type_::Type; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; @@ -41,7 +40,10 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx()).ptr_to().ptr_to()); + let llvtable = bx.pointercast( + llvtable, + bx.cx().ptr_to(bx.cx().ptr_to(fn_ty.llvm_type(bx.cx()))) + ); let ptr_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), @@ -61,7 +63,7 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx()).ptr_to()); + let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize())); let usize_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), @@ -96,7 +98,7 @@ pub fn get_vtable( } // Not in the cache. Build it. - let nullptr = cx.c_null(Type::i8p(cx)); + let nullptr = cx.c_null(cx.i8p()); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index b569fcd281859..82621ad5d9949 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -26,7 +26,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -268,7 +268,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } }; bx.load( - bx.pointercast(llslot, cast_ty.llvm_type(bx.cx()).ptr_to()), + bx.pointercast(llslot, bx.cx().ptr_to(cast_ty.llvm_type(bx.cx()))), self.fn_ty.ret.layout.align) } }; @@ -378,7 +378,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = bx.cx().c_struct( &[filename, line, col], false); + let file_line_col = bx.cx().c_struct(&[filename, line, col], false); let file_line_col = consts::addr_of(bx.cx(), file_line_col, align, @@ -560,7 +560,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - bx.cx().c_undef(fn_ty.ret.memory_ty(bx.cx()).ptr_to()) + bx.cx().c_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx()))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -760,7 +760,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx()).ptr_to()), + llval = bx.load(bx.pointercast(llval, bx.cx().ptr_to(ty.llvm_type(bx.cx()))), align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument @@ -861,7 +861,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { fn landing_pad_type(&self) -> &'ll Type { let cx = self.cx; - Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false) + cx.struct_( &[cx.i8p(), cx.i32()], false) } fn unreachable_block(&mut self) -> &'ll BasicBlock { @@ -973,7 +973,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { dst: PlaceRef<'tcx, &'ll Value>) { let src = self.codegen_operand(bx, src); let llty = src.layout.llvm_type(bx.cx()); - let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); + let cast_ptr = bx.pointercast(dst.llval, bx.cx().ptr_to(llty)); let align = src.layout.align.min(dst.layout.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 59d15ef9e0e29..854821f915373 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -26,7 +26,7 @@ use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use super::super::callee; use super::FunctionCx; @@ -41,11 +41,11 @@ pub fn scalar_to_llvm( match cv { Scalar::Bits { size: 0, .. } => { assert_eq!(0, layout.value.size(cx).bytes()); - cx.c_undef(Type::ix(cx, 0)) + cx.c_undef(cx.ix(0)) }, Scalar::Bits { bits, size } => { assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = cx.c_uint_big(Type::ix(cx, bitsize), bits); + let llval = cx.c_uint_big(cx.ix(bitsize), bits); if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { @@ -73,7 +73,7 @@ pub fn scalar_to_llvm( None => bug!("missing allocation {:?}", ptr.alloc_id), }; let llval = unsafe { llvm::LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(cx)), + consts::bitcast(base_addr, cx.i8p()), &cx.c_usize(ptr.offset.bytes()), 1, ) }; @@ -110,7 +110,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati value: layout::Primitive::Pointer, valid_range: 0..=!0 }, - Type::i8p(cx) + cx.i8p() )); next_offset = offset + pointer_size; } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 4ae772fd39c57..d06bd48fb7fb5 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -22,7 +22,6 @@ use common::{CodegenCx, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; -use type_::Type; use value::Value; use interfaces::{BuilderMethods, CommonMethods}; @@ -419,7 +418,7 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = bx.cx().c_null(Type::i8p(bx.cx())); + let null = bx.cx().c_null(bx.cx().i8p()); let sixty_four = bx.cx().c_i32(64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index d9975adb2d0c4..39bc9b4a2bb0f 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -19,10 +19,9 @@ use common::CodegenCx; use builder::{Builder, MemFlags}; use value::Value; use type_of::LayoutLlvmExt; -use type_::Type; use glue; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use std::fmt; @@ -349,7 +348,7 @@ impl OperandValue<&'ll Value> { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(Type::i8(bx.cx()), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(bx.cx().i8(), llsize, "unsized_tmp", max_align); base::call_memcpy(bx, lldst, llptr, llsize, min_align, flags); // Store the allocated region and the extra to the indirect place. @@ -460,7 +459,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let layout = bx.cx().layout_of(ty); PlaceRef::new_sized( - bx.cx().c_undef(layout.llvm_type(bx.cx()).ptr_to()), + bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))), layout, layout.align, ).load(bx) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index a5b5e73a5bb25..ee1ceb4d6f714 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -18,12 +18,11 @@ use builder::Builder; use common::{CodegenCx, IntPredicate}; use consts; use type_of::LayoutLlvmExt; -use type_::Type; use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -68,11 +67,11 @@ impl PlaceRef<'tcx, &'ll Value> { let base_addr = consts::addr_of(bx.cx(), init, layout.align, None); let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(bx.cx())), + consts::bitcast(base_addr, bx.cx().i8p()), &bx.cx().c_usize(offset.bytes()), 1, )}; - let llval = consts::bitcast(llval, layout.llvm_type(bx.cx()).ptr_to()); + let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx()))); PlaceRef::new_sized(llval, layout, alloc.align) } @@ -160,7 +159,7 @@ impl PlaceRef<'tcx, &'ll Value> { let load = bx.load(llptr, self.align); scalar_load_metadata(load, scalar); if scalar.is_bool() { - bx.trunc(load, Type::i1(bx.cx())) + bx.trunc(load, bx.cx().i1()) } else { load } @@ -196,7 +195,7 @@ impl PlaceRef<'tcx, &'ll Value> { }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()), + llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))), llextra: if cx.type_has_metadata(field.ty) { self.llextra } else { @@ -265,7 +264,7 @@ impl PlaceRef<'tcx, &'ll Value> { debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx)); + let byte_ptr = bx.pointercast(self.llval, cx.i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected @@ -273,7 +272,7 @@ impl PlaceRef<'tcx, &'ll Value> { debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()), + llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -378,7 +377,7 @@ impl PlaceRef<'tcx, &'ll Value> { bx.sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let llptr = bx.pointercast(self.llval, Type::i8(bx.cx()).ptr_to()); + let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8())); let fill_byte = bx.cx().c_u8(0); let (size, align) = self.layout.size_and_align(); let size = bx.cx().c_usize(size.bytes()); @@ -420,7 +419,7 @@ impl PlaceRef<'tcx, &'ll Value> { // Cast to the appropriate variant struct type. let variant_ty = downcast.layout.llvm_type(bx.cx()); - downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to()); + downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty)); downcast } @@ -481,7 +480,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // so we generate an abort let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = bx.cx().c_undef(layout.llvm_type(bx.cx()).ptr_to()); + let llval = bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))); PlaceRef::new_sized(llval, layout, layout.align) } } @@ -540,7 +539,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - subslice.layout.llvm_type(bx.cx()).ptr_to()); + bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx()))); subslice } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 712524db8209a..a8db74357d1a6 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -26,7 +26,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods}; +use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods, TypeMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -117,7 +117,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&bx, v); - if bx.cx().val_ty(v) == Type::i8(bx.cx()) { + if bx.cx().val_ty(v) == bx.cx().i8() { base::call_memset(&bx, start, v, size, align, false); return bx; } @@ -349,8 +349,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { - let srcsz = ll_t_in.float_width(); - let dstsz = ll_t_out.float_width(); + let srcsz = bx.cx().float_width(ll_t_in); + let dstsz = bx.cx().float_width(ll_t_out); if dstsz > srcsz { bx.fpext(llval, ll_t_out) } else if srcsz > dstsz { @@ -828,7 +828,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. - let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32; + let is_u128_to_f32 = !signed && + bx.cx().int_width(int_ty) == 128 && + bx.cx().float_width(float_ty) == 32; if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. @@ -883,39 +885,48 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - fn compute_clamp_bounds(signed: bool, int_ty: &Type) -> (u128, u128) { - let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero); - assert_eq!(rounded_min.status, Status::OK); - let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero); - assert!(rounded_max.value.is_finite()); - (rounded_min.value.to_bits(), rounded_max.value.to_bits()) - } - fn int_max(signed: bool, int_ty: &Type) -> u128 { - let shift_amount = 128 - int_ty.int_width(); + let int_max = |signed: bool, int_ty: &'ll Type| -> u128 { + let shift_amount = 128 - bx.cx().int_width(int_ty); if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } - } - fn int_min(signed: bool, int_ty: &Type) -> i128 { + }; + let int_min = |signed: bool, int_ty: &'ll Type| -> i128 { if signed { - i128::MIN >> (128 - int_ty.int_width()) + i128::MIN >> (128 - bx.cx().int_width(int_ty)) } else { 0 } - } + }; + + let compute_clamp_bounds_single = |signed: bool, int_ty: &'ll Type| -> (u128, u128) { + let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let compute_clamp_bounds_double = |signed: bool, int_ty: &'ll Type| -> (u128, u128) { + let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let float_bits_to_llval = |bits| { - let bits_llval = match float_ty.float_width() { + let bits_llval = match bx.cx().float_width(float_ty) { 32 => bx.cx().c_u32(bits as u32), 64 => bx.cx().c_u64(bits as u64), n => bug!("unsupported float width {}", n), }; consts::bitcast(bits_llval, float_ty) }; - let (f_min, f_max) = match float_ty.float_width() { - 32 => compute_clamp_bounds::(signed, int_ty), - 64 => compute_clamp_bounds::(signed, int_ty), + let (f_min, f_max) = match bx.cx().float_width(float_ty) { + 32 => compute_clamp_bounds_single(signed, int_ty), + 64 => compute_clamp_bounds_double(signed, int_ty), n => bug!("unsupported float width {}", n), }; let f_min = float_bits_to_llval(f_min); diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 6dc38eeb0825d..50ad014bb9e1f 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -17,10 +17,12 @@ use llvm::{Bool, False, True, TypeKind}; use context::CodegenCx; use value::Value; +use interfaces::TypeMethods; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use rustc_data_structures::small_c_str::SmallCStr; +use back::write; use std::fmt; @@ -40,191 +42,85 @@ impl fmt::Debug for Type { } } -impl Type { - pub fn void( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { - unsafe { - llvm::LLVMVoidTypeInContext(cx.llcx) - } - } +impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { - pub fn metadata( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn void(&self) -> &'ll Type { unsafe { - llvm::LLVMRustMetadataTypeInContext(cx.llcx) + llvm::LLVMVoidTypeInContext(&self.llcx) } } - pub fn i1( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn metadata(&self) -> &'ll Type { unsafe { - llvm::LLVMInt1TypeInContext(cx.llcx) + llvm::LLVMRustMetadataTypeInContext(self.llcx) } } - pub fn i8( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn i1(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(cx.llcx) + llvm::LLVMInt1TypeInContext(&self.llcx) } } - pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + fn i8(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(llcx) + llvm::LLVMInt8TypeInContext(&self.llcx) } } - pub fn i16( - cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { + + fn i16(&self) -> &'ll Type { unsafe { - llvm::LLVMInt16TypeInContext(cx.llcx) + llvm::LLVMInt16TypeInContext(&self.llcx) } } - pub fn i32( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn i32(&self) -> &'ll Type { unsafe { - llvm::LLVMInt32TypeInContext(cx.llcx) + llvm::LLVMInt32TypeInContext(&self.llcx) } } - pub fn i64( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn i64(&self) -> &'ll Type { unsafe { - llvm::LLVMInt64TypeInContext(cx.llcx) + llvm::LLVMInt64TypeInContext(&self.llcx) } } - pub fn i128( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn i128(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, 128) + llvm::LLVMIntTypeInContext(&self.llcx, 128) } } // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix( - cx: &CodegenCx<'ll, '_, &'ll Value>, - num_bits: u64 - ) -> &'ll Type { + fn ix(&self, num_bits: u64) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) + llvm::LLVMIntTypeInContext(&self.llcx, num_bits as c_uint) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix_llcx( - llcx: &llvm::Context, - num_bits: u64 - ) -> &Type { + fn f32(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + llvm::LLVMFloatTypeInContext(&self.llcx) } } - pub fn f32( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn f64(&self) -> &'ll Type { unsafe { - llvm::LLVMFloatTypeInContext(cx.llcx) + llvm::LLVMDoubleTypeInContext(&self.llcx) } } - pub fn f64( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { + fn x86_mmx(&self) -> &'ll Type { unsafe { - llvm::LLVMDoubleTypeInContext(cx.llcx) - } - } - - pub fn bool( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { - Type::i8(cx) - } - - pub fn char( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { - Type::i32(cx) - } - - pub fn i8p( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { - Type::i8(cx).ptr_to() - } - - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to() - } - - pub fn isize( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { - cx.isize_ty - } - - pub fn c_int( - cx: &CodegenCx<'ll, '_, &'ll Value> - ) -> &'ll Type { - match &cx.tcx.sess.target.target.target_c_int_width[..] { - "16" => Type::i16(cx), - "32" => Type::i32(cx), - "64" => Type::i64(cx), - width => bug!("Unsupported target_c_int_width: {}", width), + llvm::LLVMX86MMXTypeInContext(&self.llcx) } } - pub fn int_from_ty( - cx: &CodegenCx<'ll, '_, &'ll Value>, - t: ast::IntTy - ) -> &'ll Type { - match t { - ast::IntTy::Isize => cx.isize_ty, - ast::IntTy::I8 => Type::i8(cx), - ast::IntTy::I16 => Type::i16(cx), - ast::IntTy::I32 => Type::i32(cx), - ast::IntTy::I64 => Type::i64(cx), - ast::IntTy::I128 => Type::i128(cx), - } - } - - pub fn uint_from_ty( - cx: &CodegenCx<'ll, '_, &'ll Value>, - t: ast::UintTy - ) -> &'ll Type { - match t { - ast::UintTy::Usize => cx.isize_ty, - ast::UintTy::U8 => Type::i8(cx), - ast::UintTy::U16 => Type::i16(cx), - ast::UintTy::U32 => Type::i32(cx), - ast::UintTy::U64 => Type::i64(cx), - ast::UintTy::U128 => Type::i128(cx), - } - } - - pub fn float_from_ty( - cx: &CodegenCx<'ll, '_, &'ll Value>, - t: ast::FloatTy - ) -> &'ll Type { - match t { - ast::FloatTy::F32 => Type::f32(cx), - ast::FloatTy::F64 => Type::f64(cx), - } - } - - pub fn func( + fn func( + &self, args: &[&'ll Type], ret: &'ll Type ) -> &'ll Type { @@ -234,7 +130,8 @@ impl Type { } } - pub fn variadic_func( + fn variadic_func( + &self, args: &[&'ll Type], ret: &'ll Type ) -> &'ll Type { @@ -244,82 +141,82 @@ impl Type { } } - pub fn struct_( - cx: &CodegenCx<'ll, '_, &'ll Value>, + fn struct_( + &self, els: &[&'ll Type], packed: bool ) -> &'ll Type { unsafe { - llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), + llvm::LLVMStructTypeInContext(&self.llcx, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn named_struct(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> &'ll Type { + fn named_struct(&self, name: &str) -> &'ll Type { let name = SmallCStr::new(name); unsafe { - llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()) + llvm::LLVMStructCreateNamed(&self.llcx, name.as_ptr()) } } - pub fn array(ty: &Type, len: u64) -> &Type { + fn array(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMRustArrayType(ty, len) } } - pub fn vector(ty: &Type, len: u64) -> &Type { + fn vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - pub fn kind(&self) -> TypeKind { + fn kind(&self, ty: &'ll Type) -> TypeKind { unsafe { - llvm::LLVMRustGetTypeKind(self) + llvm::LLVMRustGetTypeKind(ty) } } - pub fn set_struct_body(&'ll self, els: &[&'ll Type], packed: bool) { + fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { unsafe { - llvm::LLVMStructSetBody(self, els.as_ptr(), + llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn ptr_to(&self) -> &Type { + fn ptr_to(&self, ty: &'ll Type) -> &'ll Type { unsafe { - llvm::LLVMPointerType(self, 0) + llvm::LLVMPointerType(ty, 0) } } - pub fn element_type(&self) -> &Type { + fn element_type(&self, ty: &'ll Type) -> &'ll Type { unsafe { - llvm::LLVMGetElementType(self) + llvm::LLVMGetElementType(ty) } } /// Return the number of elements in `self` if it is a LLVM vector type. - pub fn vector_length(&self) -> usize { + fn vector_length(&self, ty: &'ll Type) -> usize { unsafe { - llvm::LLVMGetVectorSize(self) as usize + llvm::LLVMGetVectorSize(ty) as usize } } - pub fn func_params(&self) -> Vec<&Type> { + fn func_params(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { - let n_args = llvm::LLVMCountParamTypes(self) as usize; + let n_args = llvm::LLVMCountParamTypes(ty) as usize; let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(self, args.as_mut_ptr()); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); args.set_len(n_args); args } } - pub fn float_width(&self) -> usize { - match self.kind() { + fn float_width(&self, ty : &'ll Type) -> usize { + match &self.kind(ty) { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, @@ -329,48 +226,130 @@ impl Type { } /// Retrieve the bit width of the integer type `self`. - pub fn int_width(&self) -> u64 { + fn int_width(&self, ty: &'ll Type) -> u64 { + unsafe { + llvm::LLVMGetIntTypeWidth(ty) as u64 + } + } +} + +impl Type { + pub fn i8_llcx(llcx: &llvm::Context) -> &Type { unsafe { - llvm::LLVMGetIntTypeWidth(self) as u64 + llvm::LLVMInt8TypeInContext(llcx) } } - pub fn from_integer(cx: &CodegenCx<'ll, '_, &'ll Value>, i: layout::Integer) -> &'ll Type { + // Creates an integer type with the given number of bits, e.g. i24 + pub fn ix_llcx( + llcx: &llvm::Context, + num_bits: u64 + ) -> &Type { + unsafe { + llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + } + } + + pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type { + cx.ptr_to(Type::i8_llcx(llcx)) + } +} + +impl CodegenCx<'ll, 'tcx, &'ll Value> { + + pub fn bool(&self) -> &'ll Type { + &self.i8() + } + + pub fn char(&self) -> &'ll Type { + &self.i32() + } + + pub fn i8p(&self) -> &'ll Type { + &self.ptr_to(&self.i8()) + } + + pub fn isize(&self) -> &'ll Type { + &self.isize_ty + } + + pub fn t_int(&self) -> &'ll Type { + match &self.sess().target.target.target_c_int_width[..] { + "16" => &self.i16(), + "32" => &self.i32(), + "64" => &self.i64(), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + + pub fn int_from_ty( + &self, + t: ast::IntTy + ) -> &'ll Type { + match t { + ast::IntTy::Isize => &self.isize_ty, + ast::IntTy::I8 => &self.i8(), + ast::IntTy::I16 => &self.i16(), + ast::IntTy::I32 => &self.i32(), + ast::IntTy::I64 => &self.i64(), + ast::IntTy::I128 => &self.i128(), + } + } + + pub fn uint_from_ty( + &self, + t: ast::UintTy + ) -> &'ll Type { + match t { + ast::UintTy::Usize => &self.isize_ty, + ast::UintTy::U8 => &self.i8(), + ast::UintTy::U16 => &self.i16(), + ast::UintTy::U32 => &self.i32(), + ast::UintTy::U64 => &self.i64(), + ast::UintTy::U128 => &self.i128(), + } + } + + pub fn float_from_ty( + &self, + t: ast::FloatTy + ) -> &'ll Type { + match t { + ast::FloatTy::F32 => &self.f32(), + ast::FloatTy::F64 => &self.f64(), + } + } + + pub fn from_integer(&self, i: layout::Integer) -> &'ll Type { use rustc::ty::layout::Integer::*; match i { - I8 => Type::i8(cx), - I16 => Type::i16(cx), - I32 => Type::i32(cx), - I64 => Type::i64(cx), - I128 => Type::i128(cx), + I8 => &self.i8(), + I16 => &self.i16(), + I32 => &self.i32(), + I64 => &self.i64(), + I128 => &self.i128(), } } /// Return a LLVM type that has at most the required alignment, /// as a conservative approximation for unknown pointee types. - pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_, &'ll Value>, align: Align) -> &'ll Type { + pub fn pointee_for_abi_align(&self, align: Align) -> &'ll Type { // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_abi_align(cx, align); - Type::from_integer(cx, ity) + let ity = layout::Integer::approximate_abi_align(self, align); + &self.from_integer(ity) } /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub fn padding_filler( - cx: &CodegenCx<'ll, '_, &'ll Value>, + &self, size: Size, align: Align ) -> &'ll Type { - let unit = layout::Integer::approximate_abi_align(cx, align); + let unit = layout::Integer::approximate_abi_align(self, align); let size = size.bytes(); let unit_size = unit.size().bytes(); assert_eq!(size % unit_size, 0); - Type::array(Type::from_integer(cx, unit), size / unit_size) - } - - pub fn x86_mmx(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { - unsafe { - llvm::LLVMX86MMXTypeInContext(cx.llcx) - } + &self.array(&self.from_integer(unit), size / unit_size) } } diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index e798f4e73f7f7..04de9a6676025 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -17,6 +17,7 @@ use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; use value::Value; +use interfaces::TypeMethods; use std::fmt::Write; @@ -38,14 +39,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, (cx.sess().target.target.arch == "x86" || cx.sess().target.target.arch == "x86_64"); if use_x86_mmx { - return Type::x86_mmx(cx) + return cx.x86_mmx() } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return Type::vector(element, count); + return cx.vector(element, count); } } layout::Abi::ScalarPair(..) => { - return Type::struct_(cx, &[ + return cx.struct_( &[ layout.scalar_pair_element_llvm_type(cx, 0, false), layout.scalar_pair_element_llvm_type(cx, 1, false), ], false); @@ -80,30 +81,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, match layout.fields { layout::FieldPlacement::Union(_) => { - let fill = Type::padding_filler(cx, layout.size, layout.align); + let fill = cx.padding_filler( layout.size, layout.align); let packed = false; match name { None => { - Type::struct_(cx, &[fill], packed) + cx.struct_( &[fill], packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], packed); + let llty = cx.named_struct( name); + cx.set_struct_body(llty, &[fill], packed); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(layout.field(cx, 0).llvm_type(cx), count) + cx.array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { let (llfields, packed) = struct_llfields(cx, layout); - Type::struct_(cx, &llfields, packed) + cx.struct_( &llfields, packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); + let llty = cx.named_struct( name); *defer = Some((llty, layout)); llty } @@ -137,7 +138,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); assert_eq!(offset.abi_align(padding_align) + padding, target_offset); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.padding_filler( padding, padding_align)); debug!(" padding before: {:?}", padding); result.push(field.llvm_type(cx)); @@ -154,7 +155,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, assert_eq!(offset.abi_align(padding_align) + padding, layout.size); debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, layout.size); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.padding_filler(padding, padding_align)); assert_eq!(result.len(), 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -256,17 +257,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let llty = match self.ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.layout_of(ty).llvm_type(cx).ptr_to() + cx.ptr_to(cx.layout_of(ty).llvm_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to() + cx.ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } ty::FnPtr(sig) => { let sig = cx.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() + cx.ptr_to(FnType::new(cx, sig, &[]).llvm_type(cx)) } _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; @@ -308,7 +309,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { if let Some((llty, layout)) = defer { let (llfields, packed) = struct_llfields(cx, layout); - llty.set_struct_body(&llfields, packed) + cx.set_struct_body(llty, &llfields, packed) } llty @@ -317,7 +318,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { - return Type::i1(cx); + return cx.i1(); } } self.llvm_type(cx) @@ -326,17 +327,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { - layout::Int(i, _) => Type::from_integer(cx, i), - layout::Float(FloatTy::F32) => Type::f32(cx), - layout::Float(FloatTy::F64) => Type::f64(cx), + layout::Int(i, _) => cx.from_integer( i), + layout::Float(FloatTy::F32) => cx.f32(), + layout::Float(FloatTy::F64) => cx.f64(), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - Type::pointee_for_abi_align(cx, pointee.align) + cx.pointee_for_abi_align( pointee.align) } else { - Type::i8(cx) + cx.i8() }; - pointee.ptr_to() + cx.ptr_to(pointee) } } } @@ -370,7 +371,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. if immediate && scalar.is_bool() { - return Type::i1(cx); + return cx.i1(); } let offset = if index == 0 { From 316069007ad88aa77302956d5ad2bffddd19c742 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 10:23:42 -0700 Subject: [PATCH 32/76] Work around to fix issue https://github.com/rust-lang/rust/issues/53912 --- src/librustc_codegen_llvm/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 5c006f45ae259..cfe64be48203a 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -126,7 +126,10 @@ mod debuginfo; mod declare; mod glue; mod intrinsic; -pub mod llvm; + +// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912. +#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; } + mod llvm_util; mod metadata; mod meth; From f8bdd1f65992407b37a34185643c925956433eca Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 11:57:42 -0700 Subject: [PATCH 33/76] Prefixed const methods with "const" instead of "c" --- src/librustc_codegen_llvm/abi.rs | 2 +- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/back/write.rs | 12 +-- src/librustc_codegen_llvm/base.rs | 16 ++-- src/librustc_codegen_llvm/builder.rs | 12 +-- src/librustc_codegen_llvm/common.rs | 74 +++++++++---------- src/librustc_codegen_llvm/debuginfo/gdb.rs | 4 +- src/librustc_codegen_llvm/glue.rs | 16 ++-- .../interfaces/common.rs | 40 +++++----- src/librustc_codegen_llvm/intrinsic.rs | 72 +++++++++--------- src/librustc_codegen_llvm/lib.rs | 8 +- src/librustc_codegen_llvm/meth.rs | 12 +-- src/librustc_codegen_llvm/mir/block.rs | 24 +++--- src/librustc_codegen_llvm/mir/constant.rs | 16 ++-- src/librustc_codegen_llvm/mir/mod.rs | 4 +- src/librustc_codegen_llvm/mir/operand.rs | 8 +- src/librustc_codegen_llvm/mir/place.rs | 50 ++++++------- src/librustc_codegen_llvm/mir/rvalue.rs | 46 ++++++------ 18 files changed, 209 insertions(+), 209 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 217d3058e5973..b7b9cddcb0bfc 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -242,7 +242,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { base::call_memcpy(bx, bx.pointercast(dst.llval, cx.i8p()), bx.pointercast(llscratch, cx.i8p()), - cx.c_usize(self.layout.size.bytes()), + cx.const_usize(self.layout.size.bytes()), self.layout.align.min(scratch_align), MemFlags::empty()); diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 6c589deab2fcc..3b585b807f799 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -110,7 +110,7 @@ pub fn codegen_inline_asm( let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx, key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = bx.cx().c_i32(ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1)); diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index f88d190cccdba..c6b81af59524c 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -438,17 +438,17 @@ impl CommonWriteMethods for CodegenContext<'ll> { common::val_ty(v) } - fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - common::c_bytes_in_context(llcx, bytes) + fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + common::const_bytes_in_context(llcx, bytes) } - fn c_struct_in_context( + fn const_struct_in_context( &self, llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool, ) -> &'a Value { - common::c_struct_in_context(llcx, elts, packed) + common::const_struct_in_context(llcx, elts, packed) } } @@ -931,7 +931,7 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = cgcx.c_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = cgcx.const_bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, cgcx.val_ty(llconst), @@ -951,7 +951,7 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = cgcx.c_bytes_in_context(llcx, &[]); + let llconst = cgcx.const_bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, cgcx.val_ty(llconst), diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 274c5ea630935..43dc280049e68 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -199,7 +199,7 @@ pub fn unsized_info( let (source, target) = cx.tcx.struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { (&ty::Array(_, len), &ty::Slice(_)) => { - cx.c_usize(len.unwrap_usize(cx.tcx)) + cx.const_usize(len.unwrap_usize(cx.tcx)) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker @@ -445,8 +445,8 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( let src_ptr = bx.pointercast(src, cx.i8p()); let dst_ptr = bx.pointercast(dst, cx.i8p()); let size = bx.intcast(n_bytes, cx.isize_ty, false); - let align = cx.c_i32(align.abi() as i32); - let volatile = cx.c_bool(flags.contains(MemFlags::VOLATILE)); + let align = cx.const_i32(align.abi() as i32); + let volatile = cx.const_bool(flags.contains(MemFlags::VOLATILE)); bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } @@ -463,7 +463,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>( return; } - call_memcpy(bx, dst, src, bx.cx().c_usize(size), align, flags); + call_memcpy(bx, dst, src, bx.cx().const_usize(size), align, flags); } pub fn call_memset( @@ -477,7 +477,7 @@ pub fn call_memset( let ptr_width = &bx.cx().sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key); - let volatile = bx.cx().c_bool(volatile); + let volatile = bx.cx().const_bool(volatile); bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } @@ -653,8 +653,8 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = llvm_module.c_bytes_in_context(metadata_llcx, &compressed); - let llconst = llvm_module.c_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = llvm_module.const_bytes_in_context(metadata_llcx, &compressed); + let llconst = llvm_module.const_struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { @@ -1248,7 +1248,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if !cx.used_statics.borrow().is_empty() { let name = const_cstr!("llvm.used"); let section = const_cstr!("llvm.metadata"); - let array = cx.c_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow()); + let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow()); unsafe { let g = llvm::LLVMAddGlobal(cx.llmod, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index b1c9b60d536bb..c0421a565509c 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -550,8 +550,8 @@ impl BuilderMethods<'a, 'll, 'tcx> unsafe { let llty = self.cx.val_ty(load); let v = [ - self.cx.c_uint_big(llty, range.start), - self.cx.c_uint_big(llty, range.end) + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) ]; llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, @@ -616,7 +616,7 @@ impl BuilderMethods<'a, 'll, 'tcx> // *always* point to a metadata value of the integer 1. // // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = self.cx.c_i32(1); + let one = self.cx.const_i32(1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); } @@ -865,9 +865,9 @@ impl BuilderMethods<'a, 'll, 'tcx> unsafe { let elt_ty = self.cx.val_ty(elt); let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, self.cx.c_i32(0)); + let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64); - self.shuffle_vector(vec, undef, self.cx().c_null(vec_i32_ty)) + self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty)) } } @@ -1262,7 +1262,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); let ptr = self.pointercast(ptr, self.cx.i8p()); - self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None); + self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } fn call(&self, llfn: &'ll Value, args: &[&'ll Value], diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 0dabca8b570f4..d09b892ebf366 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -206,71 +206,71 @@ impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { // LLVM constant constructors. - fn c_null(&self, t: &'ll Type) -> &'ll Value { + fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) } } - fn c_undef(&self, t: &'ll Type) -> &'ll Value { + fn const_undef(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMGetUndef(t) } } - fn c_int(&self, t: &'ll Type, i: i64) -> &'ll Value { + fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { unsafe { llvm::LLVMConstInt(t, i as u64, True) } } - fn c_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { + fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { unsafe { llvm::LLVMConstInt(t, i, False) } } - fn c_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { + fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { unsafe { let words = [u as u64, (u >> 64) as u64]; llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) } } - fn c_bool(&self, val: bool) -> &'ll Value { - &self.c_uint(&self.i1(), val as u64) + fn const_bool(&self, val: bool) -> &'ll Value { + &self.const_uint(&self.i1(), val as u64) } - fn c_i32(&self, i: i32) -> &'ll Value { - &self.c_int(&self.i32(), i as i64) + fn const_i32(&self, i: i32) -> &'ll Value { + &self.const_int(&self.i32(), i as i64) } - fn c_u32(&self, i: u32) -> &'ll Value { - &self.c_uint(&self.i32(), i as u64) + fn const_u32(&self, i: u32) -> &'ll Value { + &self.const_uint(&self.i32(), i as u64) } - fn c_u64(&self, i: u64) -> &'ll Value { - &self.c_uint(&self.i64(), i) + fn const_u64(&self, i: u64) -> &'ll Value { + &self.const_uint(&self.i64(), i) } - fn c_usize(&self, i: u64) -> &'ll Value { + fn const_usize(&self, i: u64) -> &'ll Value { let bit_size = self.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1< &'ll Value { - &self.c_uint(&self.i8(), i as u64) + fn const_u8(&self, i: u8) -> &'ll Value { + &self.const_uint(&self.i8(), i as u64) } // This is a 'c-like' raw string, which differs from // our boxed-and-length-annotated strings. - fn c_cstr( + fn const_cstr( &self, s: LocalInternedString, null_terminated: bool, @@ -299,45 +299,45 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { // NB: Do not use `do_spill_noroot` to make this into a constant string, or // you will be kicked off fast isel. See issue #4352 for an example of this. - fn c_str_slice(&self, s: LocalInternedString) -> &'ll Value { + fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { let len = s.len(); - let cs = consts::ptrcast(&self.c_cstr(s, false), + let cs = consts::ptrcast(&self.const_cstr(s, false), &self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self))); - &self.c_fat_ptr(cs, &self.c_usize(len as u64)) + &self.const_fat_ptr(cs, &self.const_usize(len as u64)) } - fn c_fat_ptr( + fn const_fat_ptr( &self, ptr: &'ll Value, meta: &'ll Value ) -> &'ll Value { assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); - &self.c_struct(&[ptr, meta], false) + &self.const_struct(&[ptr, meta], false) } - fn c_struct( + fn const_struct( &self, elts: &[&'ll Value], packed: bool ) -> &'ll Value { - &self.c_struct_in_context(&self.llcx, elts, packed) + &self.const_struct_in_context(&self.llcx, elts, packed) } - fn c_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { unsafe { return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); } } - fn c_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { unsafe { return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); } } - fn c_bytes(&self, bytes: &[u8]) -> &'ll Value { - &self.c_bytes_in_context(&self.llcx, bytes) + fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + &self.const_bytes_in_context(&self.llcx, bytes) } fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { @@ -408,14 +408,14 @@ pub fn val_ty(v: &'ll Value) -> &'ll Type { } } -pub fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { +pub fn const_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { let ptr = bytes.as_ptr() as *const c_char; return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); } } -pub fn c_struct_in_context( +pub fn const_struct_in_context( llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool, @@ -432,17 +432,17 @@ impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { val_ty(v) } - fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - c_bytes_in_context(llcx, bytes) + fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + const_bytes_in_context(llcx, bytes) } - fn c_struct_in_context( + fn const_struct_in_context( &self, llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool, ) -> &'a Value { - c_struct_in_context(llcx, elts, packed) + const_struct_in_context(llcx, elts, packed) } } @@ -513,9 +513,9 @@ pub fn shift_mask_val( // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. let val = bx.cx().int_width(llty) - 1; if invert { - bx.cx.c_int(mask_llty, !val as i64) + bx.cx.const_int(mask_llty, !val as i64) } else { - bx.cx.c_uint(mask_llty, val) + bx.cx.const_uint(mask_llty, val) } }, TypeKind::Vector => { diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index e84caee924a58..ccfd7d8920eb8 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -29,7 +29,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder<'_, 'll let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [bx.cx().c_i32(0), bx.cx().c_i32(0)]; + let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { @@ -63,7 +63,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); - llvm::LLVMSetInitializer(section_var, cx.c_bytes(section_contents)); + llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index cab3182ebcb26..5d14857cb3f6c 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -33,8 +33,8 @@ pub fn size_and_align_of_dst( let (size, align) = bx.cx().size_and_align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); - let size = bx.cx().c_usize(size.bytes()); - let align = bx.cx().c_usize(align.abi()); + let size = bx.cx().const_usize(size.bytes()); + let align = bx.cx().const_usize(align.abi()); return (size, align); } match t.sty { @@ -48,8 +48,8 @@ pub fn size_and_align_of_dst( // The info in this case is the length of the str, so the size is that // times the unit size. let (size, align) = bx.cx().size_and_align_of(unit); - (bx.mul(info.unwrap(), bx.cx().c_usize(size.bytes())), - bx.cx().c_usize(align.abi())) + (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())), + bx.cx().const_usize(align.abi())) } _ => { let cx = bx.cx(); @@ -65,8 +65,8 @@ pub fn size_and_align_of_dst( let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = cx.c_usize(sized_size); - let sized_align = cx.c_usize(sized_align); + let sized_size = cx.const_usize(sized_size); + let sized_align = cx.const_usize(sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). @@ -97,7 +97,7 @@ pub fn size_and_align_of_dst( (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - cx.c_usize(std::cmp::max(sized_align, unsized_align) as u64) + cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64) } _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align), sized_align, @@ -115,7 +115,7 @@ pub fn size_and_align_of_dst( // // `(size + (align-1)) & -align` - let addend = bx.sub(align, bx.cx().c_usize(1)); + let addend = bx.sub(align, bx.cx().const_usize(1)); let size = bx.and(bx.add(size, addend), bx.neg(align)); (size, align) diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/common.rs index 19ba30e97455d..216a5d9665fc3 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/common.rs @@ -13,36 +13,36 @@ use syntax::symbol::LocalInternedString; pub trait CommonMethods : Backend + CommonWriteMethods { // Constant constructors - fn c_null(&self, t: Self::Type) -> Self::Value; - fn c_undef(&self, t: Self::Type) -> Self::Value; - fn c_int(&self, t: Self::Type, i: i64) -> Self::Value; - fn c_uint(&self, t: Self::Type, i: u64) -> Self::Value; - fn c_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; - fn c_bool(&self, val: bool) -> Self::Value; - fn c_i32(&self, i: i32) -> Self::Value; - fn c_u32(&self, i: u32) -> Self::Value; - fn c_u64(&self, i: u64) -> Self::Value; - fn c_usize(&self, i: u64) -> Self::Value; - fn c_u8(&self, i: u8) -> Self::Value; - fn c_cstr( + fn const_null(&self, t: Self::Type) -> Self::Value; + fn const_undef(&self, t: Self::Type) -> Self::Value; + fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; + fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value; + fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; + fn const_bool(&self, val: bool) -> Self::Value; + fn const_i32(&self, i: i32) -> Self::Value; + fn const_u32(&self, i: u32) -> Self::Value; + fn const_u64(&self, i: u64) -> Self::Value; + fn const_usize(&self, i: u64) -> Self::Value; + fn const_u8(&self, i: u8) -> Self::Value; + fn const_cstr( &self, s: LocalInternedString, null_terminated: bool, ) -> Self::Value; - fn c_str_slice(&self, s: LocalInternedString) -> Self::Value; - fn c_fat_ptr( + fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; + fn const_fat_ptr( &self, ptr: Self::Value, meta: Self::Value ) -> Self::Value; - fn c_struct( + fn const_struct( &self, elts: &[Self::Value], packed: bool ) -> Self::Value; - fn c_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; - fn c_vector(&self, elts: &[Self::Value]) -> Self::Value; - fn c_bytes(&self, bytes: &[u8]) -> Self::Value; + fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; + fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; + fn const_bytes(&self, bytes: &[u8]) -> Self::Value; fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; @@ -55,8 +55,8 @@ pub trait CommonMethods : Backend + CommonWriteMethods { pub trait CommonWriteMethods : Backend { fn val_ty(&self, v: Self::Value) -> Self::Type; - fn c_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value; - fn c_struct_in_context( + fn const_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value; + fn const_struct_in_context( &self, llcx: Self::Context, elts: &[Self::Value], diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 745d3f6a855fa..0c4f09672f111 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -126,11 +126,11 @@ pub fn codegen_intrinsic_call( }, "likely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(true)], None) + bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(true)], None) } "unlikely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(false)], None) + bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None) } "try" => { try_intrinsic(bx, cx, @@ -146,7 +146,7 @@ pub fn codegen_intrinsic_call( } "size_of" => { let tp_ty = substs.type_at(0); - cx.c_usize(cx.size_of(tp_ty).bytes()) + cx.const_usize(cx.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); @@ -155,12 +155,12 @@ pub fn codegen_intrinsic_call( glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); llsize } else { - cx.c_usize(cx.size_of(tp_ty).bytes()) + cx.const_usize(cx.size_of(tp_ty).bytes()) } } "min_align_of" => { let tp_ty = substs.type_at(0); - cx.c_usize(cx.align_of(tp_ty).abi()) + cx.const_usize(cx.align_of(tp_ty).abi()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -169,20 +169,20 @@ pub fn codegen_intrinsic_call( glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); llalign } else { - cx.c_usize(cx.align_of(tp_ty).abi()) + cx.const_usize(cx.align_of(tp_ty).abi()) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - cx.c_usize(cx.align_of(tp_ty).pref()) + cx.const_usize(cx.align_of(tp_ty).pref()) } "type_name" => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - cx.c_str_slice(ty_name) + cx.const_str_slice(ty_name) } "type_id" => { - cx.c_u64(cx.tcx.type_id_hash(substs.type_at(0))) + cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0))) } "init" => { let ty = substs.type_at(0); @@ -196,8 +196,8 @@ pub fn codegen_intrinsic_call( false, ty, llresult, - cx.c_u8(0), - cx.c_usize(1) + cx.const_u8(0), + cx.const_usize(1) ); } return; @@ -209,7 +209,7 @@ pub fn codegen_intrinsic_call( "needs_drop" => { let tp_ty = substs.type_at(0); - cx.c_bool(bx.cx().type_needs_drop(tp_ty)) + cx.const_bool(bx.cx().type_needs_drop(tp_ty)) } "offset" => { let ptr = args[0].immediate(); @@ -286,9 +286,9 @@ pub fn codegen_intrinsic_call( }; bx.call(expect, &[ args[0].immediate(), - cx.c_i32(rw), + cx.const_i32(rw), args[1].immediate(), - cx.c_i32(cache_type) + cx.const_i32(cache_type) ], None) }, "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | @@ -300,12 +300,12 @@ pub fn codegen_intrinsic_call( Some((width, signed)) => match name { "ctlz" | "cttz" => { - let y = cx.c_bool(false); + let y = cx.const_bool(false); let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); bx.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { - let y = cx.c_bool(true); + let y = cx.const_bool(true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llfn = cx.get_intrinsic(llvm_name); bx.call(llfn, &[args[0].immediate(), y], None) @@ -705,8 +705,8 @@ fn copy_intrinsic( ) -> &'ll Value { let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); - let size = cx.c_usize(size.bytes()); - let align = cx.c_i32(align.abi() as i32); + let size = cx.const_usize(size.bytes()); + let align = cx.const_i32(align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -726,7 +726,7 @@ fn copy_intrinsic( src_ptr, bx.mul(size, count), align, - cx.c_bool(volatile)], + cx.const_bool(volatile)], None) } @@ -740,8 +740,8 @@ fn memset_intrinsic( ) -> &'ll Value { let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); - let size = cx.c_usize(size.bytes()); - let align = cx.c_i32(align.abi() as i32); + let size = cx.const_usize(size.bytes()); + let align = cx.const_i32(align.abi() as i32); let dst = bx.pointercast(dst, cx.i8p()); call_memset(bx, dst, val, bx.mul(size, count), align, volatile) } @@ -757,7 +757,7 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(cx.c_null(cx.i8p()), dest, ptr_align); + bx.store(cx.const_null(cx.i8p()), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { @@ -838,7 +838,7 @@ fn codegen_msvc_try( let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(cx.c_i32(0)); + normal.ret(cx.const_i32(0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); @@ -848,19 +848,19 @@ fn codegen_msvc_try( Some(did) => ::consts::get_static(cx, did), None => bug!("msvc_try_filter not defined"), }; - let tok = catchpad.catch_pad(cs, &[tydesc, cx.c_i32(0), slot]); + let tok = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); let i64_align = bx.tcx().data_layout.i64_align; let arg1 = catchpad.load(addr, i64_align); - let val1 = cx.c_i32(1); + let val1 = cx.const_i32(1); let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); catchpad.store(arg1, local_ptr, i64_align); catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); catchpad.catch_ret(tok, caught.llbb()); - caught.ret(cx.c_i32(1)); + caught.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -916,7 +916,7 @@ fn codegen_gnu_try( let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(cx.c_i32(0)); + then.ret(cx.const_i32(0)); // Type indicator for the exception being thrown. // @@ -926,11 +926,11 @@ fn codegen_gnu_try( // rust_try ignores the selector. let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false); let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); - catch.add_clause(vals, bx.cx().c_null(cx.i8p())); + catch.add_clause(vals, bx.cx().const_null(cx.i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align); - catch.ret(cx.c_i32(1)); + catch.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -1119,18 +1119,18 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(bx.cx().c_i32(idx as i32)), + Some(idx) => Some(bx.cx().const_i32(idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(bx.cx().c_null(llret_ty)) + None => return Ok(bx.cx().const_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - bx.cx().c_vector(&indices))) + bx.cx().const_vector(&indices))) } if name == "simd_insert" { @@ -1381,7 +1381,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = bx.cx().i32(); - let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1481,7 +1481,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = bx.cx().i32(); - let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1559,8 +1559,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => bx.cx().c_undef(bx.cx().f32()), - 64 => bx.cx().c_undef(bx.cx().f64()), + 32 => bx.cx().const_undef(bx.cx().f32()), + 64 => bx.cx().const_undef(bx.cx().f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index cfe64be48203a..4655a49234947 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -384,17 +384,17 @@ impl CommonWriteMethods for ModuleLlvm<'ll> { common::val_ty(v) } - fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - common::c_bytes_in_context(llcx, bytes) + fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + common::const_bytes_in_context(llcx, bytes) } - fn c_struct_in_context( + fn const_struct_in_context( &self, llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool, ) -> &'a Value { - common::c_struct_in_context(llcx, elts, packed) + common::const_struct_in_context(llcx, elts, packed) } } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 24ec83148b05a..42e8b17d6ad80 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -46,7 +46,7 @@ impl<'a, 'tcx> VirtualIndex { ); let ptr_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( - bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), + bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]), ptr_align ); bx.nonnull_metadata(ptr); @@ -66,7 +66,7 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize())); let usize_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( - bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), + bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]), usize_align ); // Vtable loads are invariant @@ -98,7 +98,7 @@ pub fn get_vtable( } // Not in the cache. Build it. - let nullptr = cx.c_null(cx.i8p()); + let nullptr = cx.const_null(cx.i8p()); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { @@ -114,11 +114,11 @@ pub fn get_vtable( // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), - cx.c_usize(size.bytes()), - cx.c_usize(align.abi()) + cx.const_usize(size.bytes()), + cx.const_usize(align.abi()) ].iter().cloned().chain(methods).collect(); - let vtable_const = cx.c_struct(&components, false); + let vtable_const = cx.const_struct(&components, false); let align = cx.data_layout().pointer_align; let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 82621ad5d9949..90507d4cdaa50 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -171,7 +171,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { slot.storage_dead(&bx); if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = bx.cx().c_undef(self.landing_pad_type()); + let mut lp = bx.cx().const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); @@ -209,7 +209,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } else { let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); - let llval = bx.cx().c_uint_big(switch_llty, values[0]); + let llval = bx.cx().const_uint_big(switch_llty, values[0]); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } @@ -220,7 +220,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { values.len()); let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); for (&value, target) in values.iter().zip(targets) { - let llval =bx.cx().c_uint_big(switch_llty, value); + let llval =bx.cx().const_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -347,7 +347,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Pass the condition through llvm.expect for branch hinting. let expect = bx.cx().get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, bx.cx().c_bool(expected)], None); + let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -365,9 +365,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.cx().c_str_slice(filename); - let line = bx.cx().c_u32(loc.line as u32); - let col = bx.cx().c_u32(loc.col.to_usize() as u32 + 1); + let filename = bx.cx().const_str_slice(filename); + let line = bx.cx().const_u32(loc.line as u32); + let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -378,7 +378,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = bx.cx().c_struct(&[filename, line, col], false); + let file_line_col = bx.cx().const_struct(&[filename, line, col], false); let file_line_col = consts::addr_of(bx.cx(), file_line_col, align, @@ -389,8 +389,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = bx.cx().c_str_slice(msg_str); - let msg_file_line_col = bx.cx().c_struct( + let msg_str = bx.cx().const_str_slice(msg_str); + let msg_file_line_col = bx.cx().const_struct( &[msg_str, filename, line, col], false ); @@ -560,7 +560,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - bx.cx().c_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx()))) + bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx()))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -701,7 +701,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { arg: &ArgType<'tcx, Ty<'tcx>>) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(bx.cx().c_undef(ty.llvm_type(bx.cx()))); + llargs.push(bx.cx().const_undef(ty.llvm_type(bx.cx()))); } if arg.is_ignore() { diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 854821f915373..fe1d71d2d5b55 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -41,11 +41,11 @@ pub fn scalar_to_llvm( match cv { Scalar::Bits { size: 0, .. } => { assert_eq!(0, layout.value.size(cx).bytes()); - cx.c_undef(cx.ix(0)) + cx.const_undef(cx.ix(0)) }, Scalar::Bits { bits, size } => { assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = cx.c_uint_big(cx.ix(bitsize), bits); + let llval = cx.const_uint_big(cx.ix(bitsize), bits); if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { @@ -74,7 +74,7 @@ pub fn scalar_to_llvm( }; let llval = unsafe { llvm::LLVMConstInBoundsGEP( consts::bitcast(base_addr, cx.i8p()), - &cx.c_usize(ptr.offset.bytes()), + &cx.const_usize(ptr.offset.bytes()), 1, ) }; if layout.value != layout::Pointer { @@ -97,7 +97,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati assert_eq!(offset as usize as u64, offset); let offset = offset as usize; if offset > next_offset { - llvals.push(cx.c_bytes(&alloc.bytes[next_offset..offset])); + llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset])); } let ptr_offset = read_target_uint( layout.endian, @@ -115,10 +115,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati next_offset = offset + pointer_size; } if alloc.bytes.len() >= next_offset { - llvals.push(cx.c_bytes(&alloc.bytes[next_offset ..])); + llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..])); } - cx.c_struct(&llvals, true) + cx.const_struct(&llvals, true) } pub fn codegen_static_initializer( @@ -208,7 +208,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bug!("simd shuffle field {:?}", field) } }).collect(); - let llval = bx.cx().c_struct(&values?, false); + let llval = bx.cx().const_struct(&values?, false); Ok((llval, c.ty)) }) .unwrap_or_else(|e| { @@ -219,7 +219,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let ty = self.monomorphize(&ty); let llty = bx.cx().layout_of(ty).llvm_type(bx.cx()); - (bx.cx().c_undef(llty), ty) + (bx.cx().const_undef(llty), ty) }) } } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index d06bd48fb7fb5..40f8de6b81355 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -418,8 +418,8 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = bx.cx().c_null(bx.cx().i8p()); - let sixty_four = bx.cx().c_i32(64); + let null = bx.cx().const_null(bx.cx().i8p()); + let sixty_four = bx.cx().const_i32(64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 39bc9b4a2bb0f..75b2f7faf7191 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -73,7 +73,7 @@ impl OperandRef<'tcx, &'ll Value> { layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(cx.c_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(cx.const_undef(layout.immediate_llvm_type(cx))), layout } } @@ -167,7 +167,7 @@ impl OperandRef<'tcx, &'ll Value> { debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = bx.cx().c_undef(llty); + let mut llpair = bx.cx().const_undef(llty); llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); llpair @@ -231,7 +231,7 @@ impl OperandRef<'tcx, &'ll Value> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, bx.cx().c_usize(i as u64))) + bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -459,7 +459,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let layout = bx.cx().layout_of(ty); PlaceRef::new_sized( - bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))), + bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))), layout, layout.align, ).load(bx) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index ee1ceb4d6f714..464b90ea33669 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -68,7 +68,7 @@ impl PlaceRef<'tcx, &'ll Value> { let llval = unsafe { LLVMConstInBoundsGEP( consts::bitcast(base_addr, bx.cx().i8p()), - &bx.cx().c_usize(offset.bytes()), + &bx.cx().const_usize(offset.bytes()), 1, )}; let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx()))); @@ -102,7 +102,7 @@ impl PlaceRef<'tcx, &'ll Value> { assert_eq!(count, 0); self.llextra.unwrap() } else { - cx.c_usize(count) + cx.const_usize(count) } } else { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) @@ -246,7 +246,7 @@ impl PlaceRef<'tcx, &'ll Value> { let meta = self.llextra; - let unaligned_offset = cx.c_usize(offset.bytes()); + let unaligned_offset = cx.const_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -257,7 +257,7 @@ impl PlaceRef<'tcx, &'ll Value> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, cx.c_usize(1u64)); + let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64)); let offset = bx.and(bx.add(unaligned_offset, align_sub_1), bx.neg(unsized_align)); @@ -287,14 +287,14 @@ impl PlaceRef<'tcx, &'ll Value> { ) -> &'ll Value { let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx()); if self.layout.abi.is_uninhabited() { - return bx.cx().c_undef(cast_to); + return bx.cx().const_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index as u128, |def| def.discriminant_for_variant(bx.cx().tcx, index).val); - return bx.cx().c_uint_big(cast_to, discr_val); + return bx.cx().const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, @@ -326,21 +326,21 @@ impl PlaceRef<'tcx, &'ll Value> { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - bx.cx().c_null(niche_llty) + bx.cx().const_null(niche_llty) } else { - bx.cx().c_uint_big(niche_llty, niche_start) + bx.cx().const_uint_big(niche_llty, niche_start) }; bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval), - bx.cx().c_uint(cast_to, *niche_variants.start() as u64), - bx.cx().c_uint(cast_to, dataful_variant as u64)) + bx.cx().const_uint(cast_to, *niche_variants.start() as u64), + bx.cx().const_uint(cast_to, dataful_variant as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); - let lldiscr = bx.sub(lldiscr, bx.cx().c_uint_big(niche_llty, delta)); - let lldiscr_max = bx.cx().c_uint(niche_llty, *niche_variants.end() as u64); + let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); + let lldiscr_max = bx.cx().const_uint(niche_llty, *niche_variants.end() as u64); bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max), bx.intcast(lldiscr, cast_to, false), - bx.cx().c_uint(cast_to, dataful_variant as u64)) + bx.cx().const_uint(cast_to, dataful_variant as u64)) } } } @@ -362,7 +362,7 @@ impl PlaceRef<'tcx, &'ll Value> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - bx.cx().c_uint_big(ptr.layout.llvm_type(bx.cx()), to), + bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to), ptr.llval, ptr.align); } @@ -378,10 +378,10 @@ impl PlaceRef<'tcx, &'ll Value> { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8())); - let fill_byte = bx.cx().c_u8(0); + let fill_byte = bx.cx().const_u8(0); let (size, align) = self.layout.size_and_align(); - let size = bx.cx().c_usize(size.bytes()); - let align = bx.cx().c_u32(align.abi() as u32); + let size = bx.cx().const_usize(size.bytes()); + let align = bx.cx().const_u32(align.abi() as u32); base::call_memset(bx, llptr, fill_byte, size, align, false); } @@ -392,9 +392,9 @@ impl PlaceRef<'tcx, &'ll Value> { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { // HACK(eddyb) Using `c_null` as it works on all types. - bx.cx().c_null(niche_llty) + bx.cx().const_null(niche_llty) } else { - bx.cx().c_uint_big(niche_llty, niche_value) + bx.cx().const_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -405,7 +405,7 @@ impl PlaceRef<'tcx, &'ll Value> { pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llindex: &'ll Value) -> PlaceRef<'tcx, &'ll Value> { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[bx.cx().c_usize(0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), llextra: None, layout: self.layout.field(bx.cx(), 0), align: self.align @@ -480,7 +480,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // so we generate an abort let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))); + let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))); PlaceRef::new_sized(llval, layout, layout.align) } } @@ -513,27 +513,27 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = bx.cx().c_usize(offset as u64); + let lloffset = bx.cx().const_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = bx.cx().c_usize(offset as u64); + let lloffset = bx.cx().const_usize(offset as u64); let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - bx.cx().c_usize(from as u64)); + bx.cx().const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - bx.cx().c_usize((from as u64) + (to as u64)))); + bx.cx().const_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index a8db74357d1a6..49d00e3bf91c7 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -102,15 +102,15 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { return bx; } - let start = dest.project_index(&bx, bx.cx().c_usize(0)).llval; + let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = bx.cx().c_i32(dest.align.abi() as i32); - let size = bx.cx().c_usize(dest.layout.size.bytes()); + let align = bx.cx().const_i32(dest.align.abi() as i32); + let size = bx.cx().const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { - let fill = bx.cx().c_u8(0); + let fill = bx.cx().const_u8(0); base::call_memset(&bx, start, fill, size, align, false); return bx; } @@ -123,7 +123,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - let count = bx.cx().c_usize(count); + let count = bx.cx().const_usize(count); let end = dest.project_index(&bx, count).llval; let header_bx = bx.build_sibling_block("repeat_loop_header"); @@ -139,7 +139,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { cg_elem.val.store(&body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[bx.cx().c_usize(1)]); + let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -291,7 +291,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { assert!(cast.is_llvm_immediate()); let ll_t_out = cast.immediate_llvm_type(bx.cx()); if operand.layout.abi.is_uninhabited() { - let val = OperandValue::Immediate(bx.cx().c_undef(ll_t_out)); + let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); return (bx, OperandRef { val, layout: cast, @@ -307,7 +307,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let discr_val = def .discriminant_for_variant(bx.cx().tcx, index) .val; - let discr = bx.cx().c_uint_big(ll_t_out, discr_val); + let discr = bx.cx().const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -338,7 +338,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { base::call_assume(&bx, bx.icmp( IntPredicate::IntULE, llval, - bx.cx().c_uint_big(ll_t_in, *scalar.valid_range.end()) + bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()) )); } } @@ -489,7 +489,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bx.cx().type_is_sized(ty)); - let val = bx.cx().c_usize(bx.cx().size_of(ty).bytes()); + let val = bx.cx().const_usize(bx.cx().size_of(ty).bytes()); let tcx = bx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -500,8 +500,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let (size, align) = bx.cx().size_and_align_of(content_ty); - let llsize = bx.cx().c_usize(size.bytes()); - let llalign = bx.cx().c_usize(align.abi()); + let llsize = bx.cx().const_usize(size.bytes()); + let llalign = bx.cx().const_usize(align.abi()); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); let llty_ptr = box_layout.llvm_type(bx.cx()); @@ -548,7 +548,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { let n = n.unwrap_usize(bx.cx().tcx); - return bx.cx().c_usize(n); + return bx.cx().const_usize(n); } } } @@ -606,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - bx.cx().c_bool(match op { + bx.cx().const_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -685,7 +685,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // while the current crate doesn't use overflow checks. if !bx.cx().check_overflow { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, bx.cx().c_bool(false)); + return OperandValue::Pair(val, bx.cx().const_bool(false)); } let (val, of) = match op { @@ -709,7 +709,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().c_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -838,9 +838,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = bx.cx().c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); - let infinity_bits = bx.cx().c_u32(ieee::Single::INFINITY.to_bits() as u32); + let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); bx.select(overflow, infinity, bx.uitofp(x, float_ty)) } else { @@ -918,8 +918,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, let float_bits_to_llval = |bits| { let bits_llval = match bx.cx().float_width(float_ty) { - 32 => bx.cx().c_u32(bits as u32), - 64 => bx.cx().c_u64(bits as u64), + 32 => bx.cx().const_u32(bits as u32), + 64 => bx.cx().const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; consts::bitcast(bits_llval, float_ty) @@ -974,8 +974,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - let int_max = bx.cx().c_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = bx.cx().c_uint_big(int_ty, int_min(signed, int_ty) as u128); + let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty)); + let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -984,7 +984,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().c_uint(int_ty, 0)) + bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0)) } else { s1 } From 49e58742b6a422c5408cb1c75a9af7b4d091dc8e Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 13:52:15 -0700 Subject: [PATCH 34/76] Prefixed type methods & removed trait impl for write::CodegenContext --- src/librustc_codegen_llvm/abi.rs | 32 ++--- src/librustc_codegen_llvm/asm.rs | 4 +- src/librustc_codegen_llvm/back/write.rs | 25 +--- src/librustc_codegen_llvm/base.rs | 31 ++--- src/librustc_codegen_llvm/builder.rs | 18 +-- src/librustc_codegen_llvm/common.rs | 14 +-- src/librustc_codegen_llvm/consts.rs | 6 +- src/librustc_codegen_llvm/context.rs | 52 ++++---- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- src/librustc_codegen_llvm/interfaces/type_.rs | 42 +++---- src/librustc_codegen_llvm/intrinsic.rs | 90 ++++++------- src/librustc_codegen_llvm/meth.rs | 6 +- src/librustc_codegen_llvm/mir/block.rs | 10 +- src/librustc_codegen_llvm/mir/constant.rs | 8 +- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 4 +- src/librustc_codegen_llvm/mir/place.rs | 25 ++-- src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- src/librustc_codegen_llvm/type_.rs | 118 +++++++++--------- src/librustc_codegen_llvm/type_of.rs | 44 +++---- 20 files changed, 262 insertions(+), 273 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index b7b9cddcb0bfc..2ccd76228afba 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -112,16 +112,16 @@ pub trait LlvmType { impl LlvmType for Reg { fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { match self.kind { - RegKind::Integer => cx.ix(self.size.bits()), + RegKind::Integer => cx.type_ix(self.size.bits()), RegKind::Float => { match self.size.bits() { - 32 => cx.f32(), - 64 => cx.f64(), + 32 => cx.type_f32(), + 64 => cx.type_f64(), _ => bug!("unsupported float: {:?}", self) } } RegKind::Vector => { - cx.vector(cx.i8(), self.size.bytes()) + cx.type_vector(cx.type_i8(), self.size.bytes()) } } } @@ -145,7 +145,7 @@ impl LlvmType for CastTarget { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return cx.array(rest_ll_unit, rest_count); + return cx.type_array(rest_ll_unit, rest_count); } } @@ -160,10 +160,10 @@ impl LlvmType for CastTarget { if rem_bytes != 0 { // Only integers can be really split further. assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(cx.ix(rem_bytes * 8)); + args.push(cx.type_ix(rem_bytes * 8)); } - cx.struct_(&args, false) + cx.type_struct(&args, false) } } @@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bx.pointercast(dst.llval, cx.ptr_to(cast.llvm_type(cx))); + let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx))); bx.store(val, cast_dst, self.layout.align); } else { // The actual return type is a struct, but the ABI @@ -240,8 +240,8 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // ...and then memcpy it to the intended destination. base::call_memcpy(bx, - bx.pointercast(dst.llval, cx.i8p()), - bx.pointercast(llscratch, cx.i8p()), + bx.pointercast(dst.llval, cx.type_i8p()), + bx.pointercast(llscratch, cx.type_i8p()), cx.const_usize(self.layout.size.bytes()), self.layout.align.min(scratch_align), MemFlags::empty()); @@ -606,14 +606,14 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { ); let llreturn_ty = match self.ret.mode { - PassMode::Ignore => cx.void(), + PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(cx.ptr_to(self.ret.memory_ty(cx))); - cx.void() + llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() } }; @@ -639,15 +639,15 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => cx.ptr_to(arg.memory_ty(cx)), + PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } if self.variadic { - cx.variadic_func(&llargument_tys, llreturn_ty) + cx.type_variadic_func(&llargument_tys, llreturn_ty) } else { - cx.func(&llargument_tys, llreturn_ty) + cx.type_func(&llargument_tys, llreturn_ty) } } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 3b585b807f799..77ce46594418a 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -75,9 +75,9 @@ pub fn codegen_inline_asm( // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => bx.cx().void(), + 0 => bx.cx().type_void(), 1 => output_types[0], - _ => bx.cx().struct_(&output_types, false) + _ => bx.cx().type_struct(&output_types, false) }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index c6b81af59524c..75218a8de52ee 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -27,7 +27,7 @@ use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitize use rustc::session::Session; use rustc::util::nodemap::FxHashMap; use time_graph::{self, TimeGraph, Timeline}; -use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock}; +use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, CachedModuleCodegen}; @@ -46,7 +46,6 @@ use syntax_pos::MultiSpan; use syntax_pos::symbol::Symbol; use type_::Type; use context::{is_pie_binary, get_reloc_model}; -use interfaces::{Backend, CommonWriteMethods}; use common; use jobserver::{Client, Acquired}; use rustc_demangle; @@ -425,15 +424,8 @@ impl CodegenContext<'ll> { } } -impl<'ll> Backend for CodegenContext<'ll> { - type Value = &'ll Value; - type BasicBlock = &'ll BasicBlock; - type Type = &'ll Type; - type Context = &'ll llvm::Context; - type TypeKind = llvm::TypeKind; -} -impl CommonWriteMethods for CodegenContext<'ll> { +impl CodegenContext<'ll> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { common::val_ty(v) } @@ -442,18 +434,7 @@ impl CommonWriteMethods for CodegenContext<'ll> { common::const_bytes_in_context(llcx, bytes) } - fn const_struct_in_context( - &self, - llcx: &'a llvm::Context, - elts: &[&'a Value], - packed: bool, - ) -> &'a Value { - common::const_struct_in_context(llcx, elts, packed) - } -} - -impl CodegenContext<'ll> { - pub fn ptr_to(&self, ty: &'ll Type) -> &'ll Type { + pub fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { unsafe { llvm::LLVMPointerType(ty, 0) } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 43dc280049e68..493634d92c5ce 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -235,13 +235,13 @@ pub fn unsize_thin_ptr( (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { @@ -354,10 +354,10 @@ fn cast_shift_rhs<'ll, F, G>(bx: &Builder<'_, 'll, '_, &'ll Value>, if op.is_shift() { let mut rhs_llty = bx.cx().val_ty(rhs); let mut lhs_llty = bx.cx().val_ty(lhs); - if bx.cx().kind(rhs_llty) == TypeKind::Vector { + if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { rhs_llty = bx.cx().element_type(rhs_llty) } - if bx.cx().kind(lhs_llty) == TypeKind::Vector { + if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { lhs_llty = bx.cx().element_type(lhs_llty) } let rhs_sz = bx.cx().int_width(rhs_llty); @@ -394,8 +394,8 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( bx: &Builder<'_ ,'ll, '_, &'ll Value>, val: &'ll Value ) -> &'ll Value { - if bx.cx().val_ty(val) == bx.cx().i1() { - bx.zext(val, bx.cx().i8()) + if bx.cx().val_ty(val) == bx.cx().type_i1() { + bx.zext(val, bx.cx().type_i8()) } else { val } @@ -418,7 +418,7 @@ pub fn to_immediate_scalar( scalar: &layout::Scalar, ) -> &'ll Value { if scalar.is_bool() { - return bx.trunc(val, bx.cx().i1()); + return bx.trunc(val, bx.cx().type_i1()); } val } @@ -434,7 +434,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = bx.load(src, align); - let ptr = bx.pointercast(dst, bx.cx().ptr_to(bx.cx().val_ty(val))); + let ptr = bx.pointercast(dst, bx.cx().type_ptr_to(bx.cx().val_ty(val))); bx.store_with_flags(val, ptr, align, flags); return; } @@ -442,8 +442,8 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( let ptr_width = &cx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); let memcpy = cx.get_intrinsic(&key); - let src_ptr = bx.pointercast(src, cx.i8p()); - let dst_ptr = bx.pointercast(dst, cx.i8p()); + let src_ptr = bx.pointercast(src, cx.type_i8p()); + let dst_ptr = bx.pointercast(dst, cx.type_i8p()); let size = bx.intcast(n_bytes, cx.isize_ty, false); let align = cx.const_i32(align.abi() as i32); let volatile = cx.const_bool(flags.contains(MemFlags::VOLATILE)); @@ -555,7 +555,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { use_start_lang_item: bool, ) { let llfty = - cx.func(&[cx.t_int(), cx.ptr_to(cx.i8p())], cx.t_int()); + cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); // Given that `main()` has no arguments, @@ -598,7 +598,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { start_def_id, cx.tcx.intern_substs(&[main_ret_ty.into()]), ); - (start_fn, vec![bx.pointercast(rust_main, cx.ptr_to(cx.i8p())), + (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv]) } else { debug!("using user-defined start fn"); @@ -606,7 +606,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { }; let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, cx.t_int(), true)); + bx.ret(bx.intcast(result, cx.type_int(), true)); } } @@ -1248,7 +1248,10 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if !cx.used_statics.borrow().is_empty() { let name = const_cstr!("llvm.used"); let section = const_cstr!("llvm.metadata"); - let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow()); + let array = cx.const_array( + &cx.type_ptr_to(cx.type_i8()), + &*cx.used_statics.borrow() + ); unsafe { let g = llvm::LLVMAddGlobal(cx.llmod, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index c0421a565509c..163b6091359aa 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -786,7 +786,7 @@ impl BuilderMethods<'a, 'll, 'tcx> }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = &self.cx().func(&argtys[..], output); + let fty = &self.cx().type_func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); @@ -864,9 +864,9 @@ impl BuilderMethods<'a, 'll, 'tcx> fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = self.cx.val_ty(elt); - let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64)); + let undef = llvm::LLVMGetUndef(&self.cx().type_vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); - let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64); + let vec_i32_ty = &self.cx().type_vector(&self.cx().type_i32(), num_elts as u64); self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty)) } } @@ -1176,9 +1176,9 @@ impl BuilderMethods<'a, 'll, 'tcx> ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.ptr_to(stored_ty); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - assert_eq!(self.cx.kind(dest_ptr_ty), llvm::TypeKind::Pointer); + assert_eq!(self.cx.type_kind(dest_ptr_ty), llvm::TypeKind::Pointer); if dest_ptr_ty == stored_ptr_ty { ptr @@ -1197,14 +1197,14 @@ impl BuilderMethods<'a, 'll, 'tcx> args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers - while self.cx.kind(fn_ty) == llvm::TypeKind::Pointer { + while self.cx.type_kind(fn_ty) == llvm::TypeKind::Pointer { fn_ty = self.cx.element_type(fn_ty); } - assert!(self.cx.kind(fn_ty) == llvm::TypeKind::Function, + assert!(self.cx.type_kind(fn_ty) == llvm::TypeKind::Function, "builder::{} not passed a function, but {:?}", typ, fn_ty); - let param_tys = self.cx.func_params(fn_ty); + let param_tys = self.cx.func_params_types(fn_ty); let all_args_match = param_tys.iter() .zip(args.iter().map(|&v| self.cx().val_ty(v))) @@ -1261,7 +1261,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, self.cx.i8p()); + let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index d09b892ebf366..6b87e5332f2fe 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -238,19 +238,19 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn const_bool(&self, val: bool) -> &'ll Value { - &self.const_uint(&self.i1(), val as u64) + &self.const_uint(&self.type_i1(), val as u64) } fn const_i32(&self, i: i32) -> &'ll Value { - &self.const_int(&self.i32(), i as i64) + &self.const_int(&self.type_i32(), i as i64) } fn const_u32(&self, i: u32) -> &'ll Value { - &self.const_uint(&self.i32(), i as u64) + &self.const_uint(&self.type_i32(), i as u64) } fn const_u64(&self, i: u64) -> &'ll Value { - &self.const_uint(&self.i64(), i) + &self.const_uint(&self.type_i64(), i) } fn const_usize(&self, i: u64) -> &'ll Value { @@ -264,7 +264,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn const_u8(&self, i: u8) -> &'ll Value { - &self.const_uint(&self.i8(), i as u64) + &self.const_uint(&self.type_i8(), i as u64) } @@ -302,7 +302,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { let len = s.len(); let cs = consts::ptrcast(&self.const_cstr(s, false), - &self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self))); + &self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self))); &self.const_fat_ptr(cs, &self.const_usize(len as u64)) } @@ -507,7 +507,7 @@ pub fn shift_mask_val( mask_llty: &'ll Type, invert: bool ) -> &'ll Value { - let kind = bx.cx().kind(llty); + let kind = bx.cx().type_kind(llty); match kind { TypeKind::Integer => { // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index d0cf751b79ef1..4add74ac3b4ea 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -313,8 +313,8 @@ pub fn codegen_static<'a, 'tcx>( // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected let mut val_llty = cx.val_ty(v); - let v = if val_llty == cx.i1() { - val_llty = cx.i8(); + let v = if val_llty == cx.type_i1() { + val_llty = cx.type_i8(); llvm::LLVMConstZExt(v, val_llty) } else { v @@ -432,7 +432,7 @@ pub fn codegen_static<'a, 'tcx>( if attrs.flags.contains(CodegenFnAttrFlags::USED) { // This static will be stored in the llvm.used variable which is an array of i8* - let cast = llvm::LLVMConstPointerCast(g, cx.i8p()); + let cast = llvm::LLVMConstPointerCast(g, cx.type_i8p()); cx.used_statics.borrow_mut().push(cast); } } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index cf9e2ab4c6ad8..1b7b6953c6410 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -381,7 +381,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } else { "rust_eh_personality" }; - let fty = &self.variadic_func(&[], &self.i32()); + let fty = &self.type_variadic_func(&[], &self.type_i32()); declare::declare_cfn(self, name, fty) } }; @@ -489,7 +489,7 @@ fn declare_intrinsic( macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, cx.func(&[], $ret)); + let f = declare::declare_cfn(cx, $name, cx.type_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -497,7 +497,7 @@ fn declare_intrinsic( ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, cx.variadic_func(&[], $ret)); + let f = declare::declare_cfn(cx, $name, cx.type_variadic_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -505,7 +505,7 @@ fn declare_intrinsic( ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(cx, $name, cx.func(&[$($arg),*], $ret)); + let f = declare::declare_cfn(cx, $name, cx.type_func(&[$($arg),*], $ret)); llvm::SetUnnamedAddr(f, false); cx.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -513,28 +513,28 @@ fn declare_intrinsic( ); } macro_rules! mk_struct { - ($($field_ty:expr),*) => (cx.struct_( &[$($field_ty),*], false)) + ($($field_ty:expr),*) => (cx.type_struct( &[$($field_ty),*], false)) } - let i8p = cx.i8p(); - let void = cx.void(); - let i1 = cx.i1(); - let t_i8 = cx.i8(); - let t_i16 = cx.i16(); - let t_i32 = cx.i32(); - let t_i64 = cx.i64(); - let t_i128 = cx.i128(); - let t_f32 = cx.f32(); - let t_f64 = cx.f64(); - - let t_v2f32 = cx.vector(t_f32, 2); - let t_v4f32 = cx.vector(t_f32, 4); - let t_v8f32 = cx.vector(t_f32, 8); - let t_v16f32 = cx.vector(t_f32, 16); - - let t_v2f64 = cx.vector(t_f64, 2); - let t_v4f64 = cx.vector(t_f64, 4); - let t_v8f64 = cx.vector(t_f64, 8); + let i8p = cx.type_i8p(); + let void = cx.type_void(); + let i1 = cx.type_i1(); + let t_i8 = cx.type_i8(); + let t_i16 = cx.type_i16(); + let t_i32 = cx.type_i32(); + let t_i64 = cx.type_i64(); + let t_i128 = cx.type_i128(); + let t_f32 = cx.type_f32(); + let t_f64 = cx.type_f64(); + + let t_v2f32 = cx.type_vector(t_f32, 2); + let t_v4f32 = cx.type_vector(t_f32, 4); + let t_v8f32 = cx.type_vector(t_f32, 8); + let t_v16f32 = cx.type_vector(t_f32, 16); + + let t_v2f64 = cx.type_vector(t_f64, 2); + let t_v4f64 = cx.type_vector(t_f64, 4); + let t_v8f64 = cx.type_vector(t_f64, 8); ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); @@ -781,8 +781,8 @@ fn declare_intrinsic( ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); if cx.sess().opts.debuginfo != DebugInfo::None { - ifn!("llvm.dbg.declare", fn(cx.metadata(), cx.metadata()) -> void); - ifn!("llvm.dbg.value", fn(cx.metadata(), t_i64, cx.metadata()) -> void); + ifn!("llvm.dbg.declare", fn(cx.type_metadata(), cx.type_metadata()) -> void); + ifn!("llvm.dbg.value", fn(cx.type_metadata(), t_i64, cx.type_metadata()) -> void); } None diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index ccfd7d8920eb8..a0f440fea9d90 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -55,7 +55,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = cx.array(cx.i8(), + let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64); let section_var = declare::define_global(cx, section_var_name, diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 7a2080e1a2671..31022140519b7 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -11,31 +11,31 @@ use super::backend::Backend; pub trait TypeMethods : Backend { - fn void(&self) -> Self::Type; - fn metadata(&self) -> Self::Type; - fn i1(&self) -> Self::Type; - fn i8(&self) -> Self::Type; - fn i16(&self) -> Self::Type; - fn i32(&self) -> Self::Type; - fn i64(&self) -> Self::Type; - fn i128(&self) -> Self::Type; - fn ix(&self, num_bites: u64) -> Self::Type; - fn f32(&self) -> Self::Type; - fn f64(&self) -> Self::Type; - fn x86_mmx(&self) -> Self::Type; + fn type_void(&self) -> Self::Type; + fn type_metadata(&self) -> Self::Type; + fn type_i1(&self) -> Self::Type; + fn type_i8(&self) -> Self::Type; + fn type_i16(&self) -> Self::Type; + fn type_i32(&self) -> Self::Type; + fn type_i64(&self) -> Self::Type; + fn type_i128(&self) -> Self::Type; + fn type_ix(&self, num_bites: u64) -> Self::Type; + fn type_f32(&self) -> Self::Type; + fn type_f64(&self) -> Self::Type; + fn type_x86_mmx(&self) -> Self::Type; - fn func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; - fn variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; - fn struct_(&self, els: &[Self::Type], packed: bool) -> Self::Type; - fn named_struct(&self, name: &str) -> Self::Type; - fn array(&self, ty: Self::Type, len: u64) -> Self::Type; - fn vector(&self, ty: Self::Type, len: u64) -> Self::Type; - fn kind(&self, ty: Self::Type) -> Self::TypeKind; + fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; + fn type_named_struct(&self, name: &str) -> Self::Type; + fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_kind(&self, ty: Self::Type) -> Self::TypeKind; fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool); - fn ptr_to(&self, ty: Self::Type) -> Self::Type; + fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; fn vector_length(&self, ty: Self::Type) -> usize; - fn func_params(&self, ty: Self::Type) -> Vec; + fn func_params_types(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; fn int_width(&self, ty: Self::Type) -> u64; } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 0c4f09672f111..34edd92a2e052 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -251,7 +251,7 @@ pub fn codegen_intrinsic_call( let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = bx.pointercast(ptr, bx.cx().ptr_to(ty.llvm_type(cx))); + ptr = bx.pointercast(ptr, bx.cx().type_ptr_to(ty.llvm_type(cx))); } let load = bx.volatile_load(ptr); let align = if name == "unaligned_volatile_load" { @@ -336,7 +336,7 @@ pub fn codegen_intrinsic_call( args[1].immediate() ], None); let val = bx.extract_value(pair, 0); - let overflow = bx.zext(bx.extract_value(pair, 1), cx.bool()); + let overflow = bx.zext(bx.extract_value(pair, 1), cx.type_bool()); let dest = result.project_field(bx, 0); bx.store(val, dest.llval, dest.align); @@ -472,7 +472,7 @@ pub fn codegen_intrinsic_call( failorder, weak); let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), bx.cx().bool()); + let success = bx.zext(bx.extract_value(pair, 1), bx.cx().type_bool()); let dest = result.project_field(bx, 0); bx.store(val, dest.llval, dest.align); @@ -562,32 +562,32 @@ pub fn codegen_intrinsic_call( ) -> Vec<&'ll Type> { use intrinsics::Type::*; match *t { - Void => vec![cx.void()], + Void => vec![cx.type_void()], Integer(_signed, _width, llvm_width) => { - vec![cx.ix( llvm_width as u64)] + vec![cx.type_ix( llvm_width as u64)] } Float(x) => { match x { - 32 => vec![cx.f32()], - 64 => vec![cx.f64()], + 32 => vec![cx.type_f32()], + 64 => vec![cx.type_f64()], _ => bug!() } } Pointer(ref t, ref llvm_elem, _const) => { let t = llvm_elem.as_ref().unwrap_or(t); let elem = one(ty_to_type(cx, t)); - vec![cx.ptr_to(elem)] + vec![cx.type_ptr_to(elem)] } Vector(ref t, ref llvm_elem, length) => { let t = llvm_elem.as_ref().unwrap_or(t); let elem = one(ty_to_type(cx, t)); - vec![cx.vector(elem, length as u64)] + vec![cx.type_vector(elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() .map(|t| one(ty_to_type(cx, t))) .collect::>(); - vec![cx.struct_( &elems, false)] + vec![cx.type_struct( &elems, false)] } Aggregate(true, ref contents) => { contents.iter() @@ -626,20 +626,20 @@ pub fn codegen_intrinsic_call( } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); - vec![bx.pointercast(arg.immediate(), bx.cx().ptr_to(llvm_elem))] + vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); vec![ bx.bitcast(arg.immediate(), - bx.cx().vector(llvm_elem, length as u64)) + bx.cx().type_vector(llvm_elem, length as u64)) ] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bx.trunc(arg.immediate(), bx.cx().ix(llvm_width as u64))] + vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))] } _ => vec![arg.immediate()], } @@ -661,7 +661,7 @@ pub fn codegen_intrinsic_call( intrinsics::IntrinsicDef::Named(name) => { let f = declare::declare_cfn(cx, name, - cx.func(&inputs, outputs)); + cx.type_func(&inputs, outputs)); bx.call(f, &llargs, None) } }; @@ -685,7 +685,7 @@ pub fn codegen_intrinsic_call( if !fn_ty.ret.is_ignore() { if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr = bx.pointercast(result.llval, cx.ptr_to(ty.llvm_type(cx))); + let ptr = bx.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx))); bx.store(llval, ptr, result.align); } else { OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) @@ -717,8 +717,8 @@ fn copy_intrinsic( let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, cx.data_layout().pointer_size.bits()); - let dst_ptr = bx.pointercast(dst, cx.i8p()); - let src_ptr = bx.pointercast(src, cx.i8p()); + let dst_ptr = bx.pointercast(dst, cx.type_i8p()); + let src_ptr = bx.pointercast(src, cx.type_i8p()); let llfn = cx.get_intrinsic(&name); bx.call(llfn, @@ -742,7 +742,7 @@ fn memset_intrinsic( let (size, align) = cx.size_and_align_of(ty); let size = cx.const_usize(size.bytes()); let align = cx.const_i32(align.abi() as i32); - let dst = bx.pointercast(dst, cx.i8p()); + let dst = bx.pointercast(dst, cx.type_i8p()); call_memset(bx, dst, val, bx.mul(size, count), align, volatile) } @@ -757,7 +757,7 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(cx.const_null(cx.i8p()), dest, ptr_align); + bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { @@ -833,7 +833,7 @@ fn codegen_msvc_try( // } // // More information can be found in libstd's seh.rs implementation. - let i64p = cx.ptr_to(cx.i64()); + let i64p = cx.type_ptr_to(cx.type_i64()); let ptr_align = bx.tcx().data_layout.pointer_align; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -924,12 +924,12 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false); + let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false); let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); - catch.add_clause(vals, bx.cx().const_null(cx.i8p())); + catch.add_clause(vals, bx.cx().const_null(cx.type_i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; - catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align); + catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align); catch.ret(cx.const_i32(1)); }); @@ -1072,7 +1072,7 @@ fn generic_simd_intrinsic( found `{}` with length {}", in_len, in_ty, ret_ty, out_len); - require!(bx.cx().kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer, + require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer, "expected return type with integer elements, found `{}` with non-integer `{}`", ret_ty, ret_ty.simd_type(tcx)); @@ -1161,8 +1161,8 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = bx.cx().i1(); - let i1xn = bx.cx().vector(i1, m_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1294,16 +1294,16 @@ fn generic_simd_intrinsic( mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { - ty::Int(v) => cx.int_from_ty( v), - ty::Uint(v) => cx.uint_from_ty( v), - ty::Float(v) => cx.float_from_ty( v), + ty::Int(v) => cx.type_int_from_ty( v), + ty::Uint(v) => cx.type_uint_from_ty( v), + ty::Float(v) => cx.type_float_from_ty( v), _ => unreachable!(), }; while no_pointers > 0 { - elem_ty = cx.ptr_to(elem_ty); + elem_ty = cx.type_ptr_to(elem_ty); no_pointers -= 1; } - cx.vector(elem_ty, vec_len as u64) + cx.type_vector(elem_ty, vec_len as u64) } @@ -1380,13 +1380,13 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = bx.cx().i32(); + let alignment_ty = bx.cx().type_i32(); let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = bx.cx().i1(); - let i1xn = bx.cx().vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; @@ -1401,7 +1401,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, - bx.cx().func(&[ + bx.cx().type_func(&[ llvm_pointer_vec_ty, alignment_ty, mask_ty, @@ -1480,17 +1480,17 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = bx.cx().i32(); + let alignment_ty = bx.cx().type_i32(); let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = bx.cx().i1(); - let i1xn = bx.cx().vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = bx.cx().void(); + let ret_t = bx.cx().type_void(); // Type of the vector of pointers: let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); @@ -1503,7 +1503,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, - bx.cx().func(&[llvm_elem_vec_ty, + bx.cx().type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1559,8 +1559,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => bx.cx().const_undef(bx.cx().f32()), - 64 => bx.cx().const_undef(bx.cx().f64()), + 32 => bx.cx().const_undef(bx.cx().type_f32()), + 64 => bx.cx().const_undef(bx.cx().type_f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1637,8 +1637,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, } // boolean reductions operate on vectors of i1s: - let i1 = bx.cx().i1(); - let i1xn = bx.cx().vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { @@ -1648,7 +1648,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, if !$boolean { r } else { - bx.zext(r, bx.cx().bool()) + bx.zext(r, bx.cx().type_bool()) } ) }, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 42e8b17d6ad80..ad53e0c7afd68 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -42,7 +42,7 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast( llvtable, - bx.cx().ptr_to(bx.cx().ptr_to(fn_ty.llvm_type(bx.cx()))) + bx.cx().type_ptr_to(bx.cx().type_ptr_to(fn_ty.llvm_type(bx.cx()))) ); let ptr_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( @@ -63,7 +63,7 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize())); + let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize())); let usize_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]), @@ -98,7 +98,7 @@ pub fn get_vtable( } // Not in the cache. Build it. - let nullptr = cx.const_null(cx.i8p()); + let nullptr = cx.const_null(cx.type_i8p()); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 90507d4cdaa50..7b6abbcb107bd 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -268,7 +268,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } }; bx.load( - bx.pointercast(llslot, bx.cx().ptr_to(cast_ty.llvm_type(bx.cx()))), + bx.pointercast(llslot, bx.cx().type_ptr_to(cast_ty.llvm_type(bx.cx()))), self.fn_ty.ret.layout.align) } }; @@ -560,7 +560,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx()))) + bx.cx().const_undef(bx.cx().type_ptr_to(fn_ty.ret.memory_ty(bx.cx()))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -760,7 +760,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, bx.cx().ptr_to(ty.llvm_type(bx.cx()))), + llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(ty.llvm_type(bx.cx()))), align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument @@ -861,7 +861,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { fn landing_pad_type(&self) -> &'ll Type { let cx = self.cx; - cx.struct_( &[cx.i8p(), cx.i32()], false) + cx.type_struct( &[cx.type_i8p(), cx.type_i32()], false) } fn unreachable_block(&mut self) -> &'ll BasicBlock { @@ -973,7 +973,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { dst: PlaceRef<'tcx, &'ll Value>) { let src = self.codegen_operand(bx, src); let llty = src.layout.llvm_type(bx.cx()); - let cast_ptr = bx.pointercast(dst.llval, bx.cx().ptr_to(llty)); + let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty)); let align = src.layout.align.min(dst.layout.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index fe1d71d2d5b55..ef06eee18b9cc 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -41,11 +41,11 @@ pub fn scalar_to_llvm( match cv { Scalar::Bits { size: 0, .. } => { assert_eq!(0, layout.value.size(cx).bytes()); - cx.const_undef(cx.ix(0)) + cx.const_undef(cx.type_ix(0)) }, Scalar::Bits { bits, size } => { assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = cx.const_uint_big(cx.ix(bitsize), bits); + let llval = cx.const_uint_big(cx.type_ix(bitsize), bits); if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { @@ -73,7 +73,7 @@ pub fn scalar_to_llvm( None => bug!("missing allocation {:?}", ptr.alloc_id), }; let llval = unsafe { llvm::LLVMConstInBoundsGEP( - consts::bitcast(base_addr, cx.i8p()), + consts::bitcast(base_addr, cx.type_i8p()), &cx.const_usize(ptr.offset.bytes()), 1, ) }; @@ -110,7 +110,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati value: layout::Primitive::Pointer, valid_range: 0..=!0 }, - cx.i8p() + cx.type_i8p() )); next_offset = offset + pointer_size; } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 40f8de6b81355..507ebf9e77d3a 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -418,7 +418,7 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = bx.cx().const_null(bx.cx().i8p()); + let null = bx.cx().const_null(bx.cx().type_i8p()); let sixty_four = bx.cx().const_i32(64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 75b2f7faf7191..20a1136efd263 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -348,7 +348,7 @@ impl OperandValue<&'ll Value> { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(bx.cx().i8(), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); base::call_memcpy(bx, lldst, llptr, llsize, min_align, flags); // Store the allocated region and the extra to the indirect place. @@ -459,7 +459,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let layout = bx.cx().layout_of(ty); PlaceRef::new_sized( - bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))), + bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))), layout, layout.align, ).load(bx) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 464b90ea33669..994df238f0551 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -67,11 +67,11 @@ impl PlaceRef<'tcx, &'ll Value> { let base_addr = consts::addr_of(bx.cx(), init, layout.align, None); let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, bx.cx().i8p()), + consts::bitcast(base_addr, bx.cx().type_i8p()), &bx.cx().const_usize(offset.bytes()), 1, )}; - let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx()))); + let llval = consts::bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))); PlaceRef::new_sized(llval, layout, alloc.align) } @@ -159,7 +159,7 @@ impl PlaceRef<'tcx, &'ll Value> { let load = bx.load(llptr, self.align); scalar_load_metadata(load, scalar); if scalar.is_bool() { - bx.trunc(load, bx.cx().i1()) + bx.trunc(load, bx.cx().type_i1()) } else { load } @@ -195,7 +195,7 @@ impl PlaceRef<'tcx, &'ll Value> { }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))), + llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))), llextra: if cx.type_has_metadata(field.ty) { self.llextra } else { @@ -264,7 +264,7 @@ impl PlaceRef<'tcx, &'ll Value> { debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, cx.i8p()); + let byte_ptr = bx.pointercast(self.llval, cx.type_i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected @@ -272,7 +272,7 @@ impl PlaceRef<'tcx, &'ll Value> { debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)), + llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -377,7 +377,10 @@ impl PlaceRef<'tcx, &'ll Value> { bx.sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8())); + let llptr = bx.pointercast( + self.llval, + bx.cx().type_ptr_to(bx.cx().type_i8()) + ); let fill_byte = bx.cx().const_u8(0); let (size, align) = self.layout.size_and_align(); let size = bx.cx().const_usize(size.bytes()); @@ -419,7 +422,7 @@ impl PlaceRef<'tcx, &'ll Value> { // Cast to the appropriate variant struct type. let variant_ty = downcast.layout.llvm_type(bx.cx()); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty)); + downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); downcast } @@ -480,7 +483,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // so we generate an abort let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))); + let llval = bx.cx().const_undef( + bx.cx().type_ptr_to(layout.llvm_type(bx.cx())) + ); PlaceRef::new_sized(llval, layout, layout.align) } } @@ -539,7 +544,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx()))); + bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx()))); subslice } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 49d00e3bf91c7..f302c0b6fa5ad 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -117,7 +117,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&bx, v); - if bx.cx().val_ty(v) == bx.cx().i8() { + if bx.cx().val_ty(v) == bx.cx().type_i8() { base::call_memset(&bx, start, v, size, align, false); return bx; } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 50ad014bb9e1f..33d99148bfd2d 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -44,82 +44,82 @@ impl fmt::Debug for Type { impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { - fn void(&self) -> &'ll Type { + fn type_void(&self) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(&self.llcx) } } - fn metadata(&self) -> &'ll Type { + fn type_metadata(&self) -> &'ll Type { unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) } } - fn i1(&self) -> &'ll Type { + fn type_i1(&self) -> &'ll Type { unsafe { llvm::LLVMInt1TypeInContext(&self.llcx) } } - fn i8(&self) -> &'ll Type { + fn type_i8(&self) -> &'ll Type { unsafe { llvm::LLVMInt8TypeInContext(&self.llcx) } } - fn i16(&self) -> &'ll Type { + fn type_i16(&self) -> &'ll Type { unsafe { llvm::LLVMInt16TypeInContext(&self.llcx) } } - fn i32(&self) -> &'ll Type { + fn type_i32(&self) -> &'ll Type { unsafe { llvm::LLVMInt32TypeInContext(&self.llcx) } } - fn i64(&self) -> &'ll Type { + fn type_i64(&self) -> &'ll Type { unsafe { llvm::LLVMInt64TypeInContext(&self.llcx) } } - fn i128(&self) -> &'ll Type { + fn type_i128(&self) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(&self.llcx, 128) } } // Creates an integer type with the given number of bits, e.g. i24 - fn ix(&self, num_bits: u64) -> &'ll Type { + fn type_ix(&self, num_bits: u64) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(&self.llcx, num_bits as c_uint) } } - fn f32(&self) -> &'ll Type { + fn type_f32(&self) -> &'ll Type { unsafe { llvm::LLVMFloatTypeInContext(&self.llcx) } } - fn f64(&self) -> &'ll Type { + fn type_f64(&self) -> &'ll Type { unsafe { llvm::LLVMDoubleTypeInContext(&self.llcx) } } - fn x86_mmx(&self) -> &'ll Type { + fn type_x86_mmx(&self) -> &'ll Type { unsafe { llvm::LLVMX86MMXTypeInContext(&self.llcx) } } - fn func( + fn type_func( &self, args: &[&'ll Type], ret: &'ll Type @@ -130,7 +130,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn variadic_func( + fn type_variadic_func( &self, args: &[&'ll Type], ret: &'ll Type @@ -141,7 +141,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn struct_( + fn type_struct( &self, els: &[&'ll Type], packed: bool @@ -153,7 +153,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn named_struct(&self, name: &str) -> &'ll Type { + fn type_named_struct(&self, name: &str) -> &'ll Type { let name = SmallCStr::new(name); unsafe { llvm::LLVMStructCreateNamed(&self.llcx, name.as_ptr()) @@ -161,19 +161,19 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } - fn array(&self, ty: &'ll Type, len: u64) -> &'ll Type { + fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMRustArrayType(ty, len) } } - fn vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { + fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - fn kind(&self, ty: &'ll Type) -> TypeKind { + fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(ty) } @@ -186,7 +186,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn ptr_to(&self, ty: &'ll Type) -> &'ll Type { + fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { unsafe { llvm::LLVMPointerType(ty, 0) } @@ -205,7 +205,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } - fn func_params(&self, ty: &'ll Type) -> Vec<&'ll Type> { + fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { let n_args = llvm::LLVMCountParamTypes(ty) as usize; let mut args = Vec::with_capacity(n_args); @@ -216,7 +216,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn float_width(&self, ty : &'ll Type) -> usize { - match &self.kind(ty) { + match &self.type_kind(ty) { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, @@ -251,97 +251,97 @@ impl Type { } pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type { - cx.ptr_to(Type::i8_llcx(llcx)) + cx.type_ptr_to(Type::i8_llcx(llcx)) } } impl CodegenCx<'ll, 'tcx, &'ll Value> { - pub fn bool(&self) -> &'ll Type { - &self.i8() + pub fn type_bool(&self) -> &'ll Type { + &self.type_i8() } - pub fn char(&self) -> &'ll Type { - &self.i32() + pub fn type_char(&self) -> &'ll Type { + &self.type_i32() } - pub fn i8p(&self) -> &'ll Type { - &self.ptr_to(&self.i8()) + pub fn type_i8p(&self) -> &'ll Type { + &self.type_ptr_to(&self.type_i8()) } - pub fn isize(&self) -> &'ll Type { + pub fn type_isize(&self) -> &'ll Type { &self.isize_ty } - pub fn t_int(&self) -> &'ll Type { + pub fn type_int(&self) -> &'ll Type { match &self.sess().target.target.target_c_int_width[..] { - "16" => &self.i16(), - "32" => &self.i32(), - "64" => &self.i64(), + "16" => &self.type_i16(), + "32" => &self.type_i32(), + "64" => &self.type_i64(), width => bug!("Unsupported target_c_int_width: {}", width), } } - pub fn int_from_ty( + pub fn type_int_from_ty( &self, t: ast::IntTy ) -> &'ll Type { match t { ast::IntTy::Isize => &self.isize_ty, - ast::IntTy::I8 => &self.i8(), - ast::IntTy::I16 => &self.i16(), - ast::IntTy::I32 => &self.i32(), - ast::IntTy::I64 => &self.i64(), - ast::IntTy::I128 => &self.i128(), + ast::IntTy::I8 => &self.type_i8(), + ast::IntTy::I16 => &self.type_i16(), + ast::IntTy::I32 => &self.type_i32(), + ast::IntTy::I64 => &self.type_i64(), + ast::IntTy::I128 => &self.type_i128(), } } - pub fn uint_from_ty( + pub fn type_uint_from_ty( &self, t: ast::UintTy ) -> &'ll Type { match t { ast::UintTy::Usize => &self.isize_ty, - ast::UintTy::U8 => &self.i8(), - ast::UintTy::U16 => &self.i16(), - ast::UintTy::U32 => &self.i32(), - ast::UintTy::U64 => &self.i64(), - ast::UintTy::U128 => &self.i128(), + ast::UintTy::U8 => &self.type_i8(), + ast::UintTy::U16 => &self.type_i16(), + ast::UintTy::U32 => &self.type_i32(), + ast::UintTy::U64 => &self.type_i64(), + ast::UintTy::U128 => &self.type_i128(), } } - pub fn float_from_ty( + pub fn type_float_from_ty( &self, t: ast::FloatTy ) -> &'ll Type { match t { - ast::FloatTy::F32 => &self.f32(), - ast::FloatTy::F64 => &self.f64(), + ast::FloatTy::F32 => &self.type_f32(), + ast::FloatTy::F64 => &self.type_f64(), } } - pub fn from_integer(&self, i: layout::Integer) -> &'ll Type { + pub fn type_from_integer(&self, i: layout::Integer) -> &'ll Type { use rustc::ty::layout::Integer::*; match i { - I8 => &self.i8(), - I16 => &self.i16(), - I32 => &self.i32(), - I64 => &self.i64(), - I128 => &self.i128(), + I8 => &self.type_i8(), + I16 => &self.type_i16(), + I32 => &self.type_i32(), + I64 => &self.type_i64(), + I128 => &self.type_i128(), } } /// Return a LLVM type that has at most the required alignment, /// as a conservative approximation for unknown pointee types. - pub fn pointee_for_abi_align(&self, align: Align) -> &'ll Type { + pub fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = layout::Integer::approximate_abi_align(self, align); - &self.from_integer(ity) + &self.type_from_integer(ity) } /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. - pub fn padding_filler( + pub fn type_padding_filler( &self, size: Size, align: Align @@ -350,6 +350,6 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { let size = size.bytes(); let unit_size = unit.size().bytes(); assert_eq!(size % unit_size, 0); - &self.array(&self.from_integer(unit), size / unit_size) + &self.type_array(&self.type_from_integer(unit), size / unit_size) } } diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 04de9a6676025..a5aaede1fc84c 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -39,14 +39,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, (cx.sess().target.target.arch == "x86" || cx.sess().target.target.arch == "x86_64"); if use_x86_mmx { - return cx.x86_mmx() + return cx.type_x86_mmx() } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return cx.vector(element, count); + return cx.type_vector(element, count); } } layout::Abi::ScalarPair(..) => { - return cx.struct_( &[ + return cx.type_struct( &[ layout.scalar_pair_element_llvm_type(cx, 0, false), layout.scalar_pair_element_llvm_type(cx, 1, false), ], false); @@ -81,30 +81,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, match layout.fields { layout::FieldPlacement::Union(_) => { - let fill = cx.padding_filler( layout.size, layout.align); + let fill = cx.type_padding_filler( layout.size, layout.align); let packed = false; match name { None => { - cx.struct_( &[fill], packed) + cx.type_struct( &[fill], packed) } Some(ref name) => { - let llty = cx.named_struct( name); + let llty = cx.type_named_struct( name); cx.set_struct_body(llty, &[fill], packed); llty } } } layout::FieldPlacement::Array { count, .. } => { - cx.array(layout.field(cx, 0).llvm_type(cx), count) + cx.type_array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { let (llfields, packed) = struct_llfields(cx, layout); - cx.struct_( &llfields, packed) + cx.type_struct( &llfields, packed) } Some(ref name) => { - let llty = cx.named_struct( name); + let llty = cx.type_named_struct( name); *defer = Some((llty, layout)); llty } @@ -138,7 +138,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); assert_eq!(offset.abi_align(padding_align) + padding, target_offset); - result.push(cx.padding_filler( padding, padding_align)); + result.push(cx.type_padding_filler( padding, padding_align)); debug!(" padding before: {:?}", padding); result.push(field.llvm_type(cx)); @@ -155,7 +155,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, assert_eq!(offset.abi_align(padding_align) + padding, layout.size); debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, layout.size); - result.push(cx.padding_filler(padding, padding_align)); + result.push(cx.type_padding_filler(padding, padding_align)); assert_eq!(result.len(), 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -257,17 +257,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let llty = match self.ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.ptr_to(cx.layout_of(ty).llvm_type(cx)) + cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } ty::FnPtr(sig) => { let sig = cx.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - cx.ptr_to(FnType::new(cx, sig, &[]).llvm_type(cx)) + cx.type_ptr_to(FnType::new(cx, sig, &[]).llvm_type(cx)) } _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; @@ -318,7 +318,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { - return cx.i1(); + return cx.type_i1(); } } self.llvm_type(cx) @@ -327,17 +327,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { - layout::Int(i, _) => cx.from_integer( i), - layout::Float(FloatTy::F32) => cx.f32(), - layout::Float(FloatTy::F64) => cx.f64(), + layout::Int(i, _) => cx.type_from_integer( i), + layout::Float(FloatTy::F32) => cx.type_f32(), + layout::Float(FloatTy::F64) => cx.type_f64(), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - cx.pointee_for_abi_align( pointee.align) + cx.type_pointee_for_abi_align( pointee.align) } else { - cx.i8() + cx.type_i8() }; - cx.ptr_to(pointee) + cx.type_ptr_to(pointee) } } } @@ -371,7 +371,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. if immediate && scalar.is_bool() { - return cx.i1(); + return cx.type_i1(); } let offset = if index == 0 { From 2a513dff5956a5a41c58af5825c489bb5d6205c4 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 14:44:51 -0700 Subject: [PATCH 35/76] Removing LLVM content from CommonMethods -> ConstMethods --- src/librustc_codegen_llvm/abi.rs | 2 +- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/back/write.rs | 29 +++-------------- src/librustc_codegen_llvm/base.rs | 8 ++--- src/librustc_codegen_llvm/builder.rs | 2 +- src/librustc_codegen_llvm/callee.rs | 2 +- src/librustc_codegen_llvm/common.rs | 32 ++++--------------- src/librustc_codegen_llvm/consts.rs | 2 +- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- src/librustc_codegen_llvm/glue.rs | 2 +- .../interfaces/{common.rs => consts.rs} | 13 +------- src/librustc_codegen_llvm/interfaces/mod.rs | 4 +-- src/librustc_codegen_llvm/interfaces/type_.rs | 2 ++ src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_llvm/lib.rs | 30 ----------------- src/librustc_codegen_llvm/meth.rs | 2 +- src/librustc_codegen_llvm/mir/block.rs | 2 +- src/librustc_codegen_llvm/mir/constant.rs | 2 +- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 2 +- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- src/librustc_codegen_llvm/type_.rs | 20 ++++++++---- 23 files changed, 49 insertions(+), 119 deletions(-) rename src/librustc_codegen_llvm/interfaces/{common.rs => consts.rs} (83%) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 2ccd76228afba..6f0766255afb4 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -19,7 +19,7 @@ use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty}; diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 77ce46594418a..558708a4fb9e5 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -15,7 +15,7 @@ use builder::Builder; use value::Value; use rustc::hir; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use mir::place::PlaceRef; use mir::operand::OperandValue; diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 75218a8de52ee..2954d7ca5fd2f 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -49,7 +49,6 @@ use context::{is_pie_binary, get_reloc_model}; use common; use jobserver::{Client, Acquired}; use rustc_demangle; -use value::Value; use std::marker::PhantomData; use std::any::Any; @@ -424,24 +423,6 @@ impl CodegenContext<'ll> { } } - -impl CodegenContext<'ll> { - fn val_ty(&self, v: &'ll Value) -> &'ll Type { - common::val_ty(v) - } - - fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - common::const_bytes_in_context(llcx, bytes) - } - - pub fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { - unsafe { - llvm::LLVMPointerType(ty, 0) - } - } -} - - pub struct DiagnosticHandlers<'a> { data: *mut (&'a CodegenContext<'a>, &'a Handler), llcx: &'a llvm::Context, @@ -912,10 +893,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = cgcx.const_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, - cgcx.val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.module\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -932,10 +913,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = cgcx.const_bytes_in_context(llcx, &[]); + let llconst = common::bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, - cgcx.val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.cmdline\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -2566,7 +2547,7 @@ fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::M "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx(cgcx, llcx); + let i8p_ty = Type::i8p_llcx(llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage && diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 493634d92c5ce..c1662df719e4f 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -74,7 +74,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use std::any::Any; use std::ffi::CString; @@ -653,12 +653,12 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = llvm_module.const_bytes_in_context(metadata_llcx, &compressed); - let llconst = llvm_module.const_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = common::bytes_in_context(metadata_llcx, &compressed); + let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { - llvm::LLVMAddGlobal(metadata_llmod, llvm_module.val_ty(llconst), buf.as_ptr()) + llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) }; unsafe { llvm::LLVMSetInitializer(llglobal, llconst); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 163b6091359aa..b335c30e0e199 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, Backend, CommonMethods, CommonWriteMethods, TypeMethods}; +use interfaces::{BuilderMethods, Backend, ConstMethods, TypeMethods}; use syntax; use std::borrow::Cow; diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 8d60db2bb1e9a..58d67149b5ccd 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -22,7 +22,7 @@ use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::CommonWriteMethods; +use interfaces::TypeMethods; use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 6b87e5332f2fe..27f45d0db2ada 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -24,7 +24,7 @@ use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{Backend, CommonMethods, CommonWriteMethods, TypeMethods}; +use interfaces::{Backend, ConstMethods, TypeMethods}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; @@ -203,7 +203,7 @@ impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { type Context = &'ll llvm::Context; } -impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { +impl<'ll, 'tcx : 'll> ConstMethods for CodegenCx<'ll, 'tcx, &'ll Value> { // LLVM constant constructors. fn const_null(&self, t: &'ll Type) -> &'ll Value { @@ -321,7 +321,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { elts: &[&'ll Value], packed: bool ) -> &'ll Value { - &self.const_struct_in_context(&self.llcx, elts, packed) + struct_in_context(&self.llcx, elts, packed) } fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { @@ -337,7 +337,7 @@ impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { - &self.const_bytes_in_context(&self.llcx, bytes) + bytes_in_context(&self.llcx, bytes) } fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { @@ -408,14 +408,14 @@ pub fn val_ty(v: &'ll Value) -> &'ll Type { } } -pub fn const_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { +pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { let ptr = bytes.as_ptr() as *const c_char; return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); } } -pub fn const_struct_in_context( +pub fn struct_in_context( llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool, @@ -427,26 +427,6 @@ pub fn const_struct_in_context( } } -impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx, &'ll Value> { - fn val_ty(&self, v: &'ll Value) -> &'ll Type { - val_ty(v) - } - - fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - const_bytes_in_context(llcx, bytes) - } - - fn const_struct_in_context( - &self, - llcx: &'a llvm::Context, - elts: &[&'a Value], - packed: bool, - ) -> &'a Value { - const_struct_in_context(llcx, elts, packed) - } -} - - #[inline] fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { ((hi as u128) << 64) | (lo as u128) diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 4add74ac3b4ea..d806ddd1fcddd 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -24,7 +24,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::{CommonWriteMethods, TypeMethods}; +use interfaces::TypeMethods; use rustc::ty::layout::{Align, LayoutOf}; diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index a0f440fea9d90..41a4962fcc310 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -17,7 +17,7 @@ use builder::Builder; use declare; use rustc::session::config::DebugInfo; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use syntax::attr; diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 5d14857cb3f6c..d15a182a313b2 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -20,7 +20,7 @@ use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, ConstMethods}; pub fn size_and_align_of_dst( bx: &Builder<'_, 'll, 'tcx, &'ll Value>, diff --git a/src/librustc_codegen_llvm/interfaces/common.rs b/src/librustc_codegen_llvm/interfaces/consts.rs similarity index 83% rename from src/librustc_codegen_llvm/interfaces/common.rs rename to src/librustc_codegen_llvm/interfaces/consts.rs index 216a5d9665fc3..62ea583acbc38 100644 --- a/src/librustc_codegen_llvm/interfaces/common.rs +++ b/src/librustc_codegen_llvm/interfaces/consts.rs @@ -11,7 +11,7 @@ use super::Backend; use syntax::symbol::LocalInternedString; -pub trait CommonMethods : Backend + CommonWriteMethods { +pub trait ConstMethods : Backend { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; fn const_undef(&self, t: Self::Type) -> Self::Value; @@ -52,14 +52,3 @@ pub trait CommonMethods : Backend + CommonWriteMethods { fn is_const_integral(&self, v: Self::Value) -> bool; fn is_const_real(&self, v: Self::Value) -> bool; } - -pub trait CommonWriteMethods : Backend { - fn val_ty(&self, v: Self::Value) -> Self::Type; - fn const_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value; - fn const_struct_in_context( - &self, - llcx: Self::Context, - elts: &[Self::Value], - packed: bool, - ) -> Self::Value; -} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 3e9c7eb881d18..93c46aed4acbe 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -10,10 +10,10 @@ mod builder; mod backend; -mod common; +mod consts; mod type_; pub use self::builder::BuilderMethods; pub use self::backend::Backend; -pub use self::common::{CommonMethods, CommonWriteMethods}; +pub use self::consts::ConstMethods; pub use self::type_::TypeMethods; diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 31022140519b7..93d14633c3f85 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -38,4 +38,6 @@ pub trait TypeMethods : Backend { fn func_params_types(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; fn int_width(&self, ty: Self::Type) -> u64; + + fn val_ty(&self, v: Self::Value) -> Self::Type; } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 34edd92a2e052..fb15d126df12d 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -31,7 +31,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use rustc::session::Session; use syntax_pos::Span; diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 4655a49234947..04344ce211a6b 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -68,9 +68,6 @@ extern crate tempfile; extern crate memmap; use back::bytecode::RLIB_BYTECODE_EXTENSION; -use interfaces::{Backend, CommonWriteMethods}; -use value::Value; -use type_::Type; pub use llvm_util::target_features; use std::any::Any; @@ -346,14 +343,6 @@ struct ModuleLlvm<'ll> { phantom: PhantomData<&'ll ()> } -impl<'ll> Backend for ModuleLlvm<'ll> { - type Value = &'ll Value; - type BasicBlock = &'ll llvm::BasicBlock; - type Type = &'ll Type; - type TypeKind = llvm::TypeKind; - type Context = &'ll llvm::Context; -} - unsafe impl Send for ModuleLlvm<'ll> { } unsafe impl Sync for ModuleLlvm<'ll> { } @@ -379,25 +368,6 @@ impl ModuleLlvm<'ll> { } } -impl CommonWriteMethods for ModuleLlvm<'ll> { - fn val_ty(&self, v: &'ll Value) -> &'ll Type { - common::val_ty(v) - } - - fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - common::const_bytes_in_context(llcx, bytes) - } - - fn const_struct_in_context( - &self, - llcx: &'a llvm::Context, - elts: &[&'a Value], - packed: bool, - ) -> &'a Value { - common::const_struct_in_context(llcx, elts, packed) - } -} - impl Drop for ModuleLlvm<'ll> { fn drop(&mut self) { unsafe { diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index ad53e0c7afd68..83100447a5a4c 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -16,7 +16,7 @@ use consts; use monomorphize; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 7b6abbcb107bd..3a008494f8ead 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -26,7 +26,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use syntax::symbol::Symbol; use syntax_pos::Pos; diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index ef06eee18b9cc..2d775d3124158 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -26,7 +26,7 @@ use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use super::super::callee; use super::FunctionCx; diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 507ebf9e77d3a..ce6d3f9e0daa1 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -23,7 +23,7 @@ use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebug use monomorphize::Instance; use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; use value::Value; -use interfaces::{BuilderMethods, CommonMethods}; +use interfaces::{BuilderMethods, ConstMethods}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 20a1136efd263..78d2ba5a5f01c 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -21,7 +21,7 @@ use value::Value; use type_of::LayoutLlvmExt; use glue; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use std::fmt; diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 994df238f0551..6be273d8866ed 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -22,7 +22,7 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use interfaces::{BuilderMethods, CommonMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index f302c0b6fa5ad..be4d645080302 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -26,7 +26,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 33d99148bfd2d..efcb617ebe4d3 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -22,7 +22,7 @@ use interfaces::TypeMethods; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use rustc_data_structures::small_c_str::SmallCStr; -use back::write; +use common; use std::fmt; @@ -187,9 +187,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { - unsafe { - llvm::LLVMPointerType(ty, 0) - } + ty.ptr_to() } fn element_type(&self, ty: &'ll Type) -> &'ll Type { @@ -231,6 +229,10 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { llvm::LLVMGetIntTypeWidth(ty) as u64 } } + + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + common::val_ty(v) + } } impl Type { @@ -250,8 +252,14 @@ impl Type { } } - pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type { - cx.type_ptr_to(Type::i8_llcx(llcx)) + pub fn i8p_llcx(llcx: &'ll llvm::Context) -> &'ll Type { + Type::i8_llcx(llcx).ptr_to() + } + + pub fn ptr_to(&self) -> &Type { + unsafe { + llvm::LLVMPointerType(&self, 0) + } } } From 79ea68f3803a895d66815cbf9d38dfec6566d744 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 16:04:20 -0700 Subject: [PATCH 36/76] Removed phantomdata no longer necessary Because CodegenContext doesn't implement Backend anymore --- src/librustc_codegen_llvm/back/lto.rs | 2 -- src/librustc_codegen_llvm/back/write.rs | 17 ++++++----------- src/librustc_codegen_llvm/lib.rs | 15 ++++++--------- 3 files changed, 12 insertions(+), 22 deletions(-) diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 5392187a1b491..61856236a1491 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -26,7 +26,6 @@ use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; use {ModuleCodegen, ModuleLlvm, ModuleKind}; -use std::marker::PhantomData; use libc; @@ -764,7 +763,6 @@ impl ThinModule { llmod_raw, llcx, tm, - phantom: PhantomData }, name: self.name().to_string(), kind: ModuleKind::Regular, diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 2954d7ca5fd2f..741e0840be167 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -49,7 +49,6 @@ use context::{is_pie_binary, get_reloc_model}; use common; use jobserver::{Client, Acquired}; use rustc_demangle; -use std::marker::PhantomData; use std::any::Any; use std::ffi::{CString, CStr}; @@ -348,7 +347,7 @@ struct AssemblerCommand { /// Additional resources used by optimize_and_codegen (not module specific) #[derive(Clone)] -pub struct CodegenContext<'ll> { +pub struct CodegenContext { // Resources needed when running LTO pub time_passes: bool, pub lto: Lto, @@ -389,13 +388,10 @@ pub struct CodegenContext<'ll> { // measuring is disabled. time_graph: Option, // The assembler command if no_integrated_as option is enabled, None otherwise - assembler_cmd: Option>, - // This field is used to give a lifetime parameter to the struct so that it can implement - // the Backend trait. - phantom: PhantomData<&'ll ()> + assembler_cmd: Option> } -impl CodegenContext<'ll> { +impl CodegenContext { pub fn create_diag_handler(&self) -> Handler { Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) } @@ -424,12 +420,12 @@ impl CodegenContext<'ll> { } pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext<'a>, &'a Handler), + data: *mut (&'a CodegenContext, &'a Handler), llcx: &'a llvm::Context, } impl<'a> DiagnosticHandlers<'a> { - pub fn new(cgcx: &'a CodegenContext<'a>, + pub fn new(cgcx: &'a CodegenContext, handler: &'a Handler, llcx: &'a llvm::Context) -> Self { let data = Box::into_raw(Box::new((cgcx, handler))); @@ -1630,7 +1626,6 @@ fn start_executing_work(tcx: TyCtxt, target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), debuginfo: tcx.sess.opts.debuginfo, assembler_cmd, - phantom: PhantomData }; // This is the "main loop" of parallel work happening for parallel codegen. @@ -2099,7 +2094,7 @@ pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); -fn spawn_work(cgcx: CodegenContext<'static>, work: WorkItem) { +fn spawn_work(cgcx: CodegenContext, work: WorkItem) { let depth = time_depth(); thread::spawn(move || { diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 04344ce211a6b..dce11ec2faa55 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -73,7 +73,6 @@ pub use llvm_util::target_features; use std::any::Any; use std::path::{PathBuf}; use std::sync::mpsc; -use std::marker::PhantomData; use rustc_data_structures::sync::Lrc; use rustc::dep_graph::DepGraph; @@ -278,7 +277,7 @@ struct ModuleCodegen { /// as the crate name and disambiguator. /// We currently generate these names via CodegenUnit::build_cgu_name(). name: String, - module_llvm: ModuleLlvm<'static>, + module_llvm: ModuleLlvm, kind: ModuleKind, } @@ -336,17 +335,16 @@ struct CompiledModule { bytecode_compressed: Option, } -struct ModuleLlvm<'ll> { +struct ModuleLlvm { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, tm: &'static mut llvm::TargetMachine, - phantom: PhantomData<&'ll ()> } -unsafe impl Send for ModuleLlvm<'ll> { } -unsafe impl Sync for ModuleLlvm<'ll> { } +unsafe impl Send for ModuleLlvm { } +unsafe impl Sync for ModuleLlvm { } -impl ModuleLlvm<'ll> { +impl ModuleLlvm { fn new(sess: &Session, mod_name: &str) -> Self { unsafe { let llcx = llvm::LLVMRustContextCreate(sess.fewer_names()); @@ -356,7 +354,6 @@ impl ModuleLlvm<'ll> { llmod_raw, llcx, tm: create_target_machine(sess, false), - phantom: PhantomData } } } @@ -368,7 +365,7 @@ impl ModuleLlvm<'ll> { } } -impl Drop for ModuleLlvm<'ll> { +impl Drop for ModuleLlvm { fn drop(&mut self) { unsafe { llvm::LLVMContextDispose(&mut *(self.llcx as *mut _)); From 18216fc8056a114f42a26523cef8229e75e75b37 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 17:27:04 -0700 Subject: [PATCH 37/76] Fixed typos --- src/librustc_codegen_llvm/back/link.rs | 2 +- src/librustc_codegen_llvm/interfaces/type_.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 8993937ebb2b5..86c6a5e65b0e9 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -748,7 +748,7 @@ fn link_natively(sess: &Session, // with some thread pool working in the background. It seems that no one // currently knows a fix for this so in the meantime we're left with this... info!("{:?}", &cmd); - let retry_on_segfault = env::var("RUSTc_RETRY_LINKER_ON_SEGFAULT").is_ok(); + let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok(); let mut prog; let mut i = 0; loop { diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 93d14633c3f85..69b5e6a93c17c 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -19,7 +19,7 @@ pub trait TypeMethods : Backend { fn type_i32(&self) -> Self::Type; fn type_i64(&self) -> Self::Type; fn type_i128(&self) -> Self::Type; - fn type_ix(&self, num_bites: u64) -> Self::Type; + fn type_ix(&self, num_bits: u64) -> Self::Type; fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; fn type_x86_mmx(&self) -> Self::Type; From 626d3cbde196815272fa4a57e3d6548a9fb782b6 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 6 Sep 2018 18:31:42 -0700 Subject: [PATCH 38/76] Attempt at including CodegenCx within Builder with Associated types --- src/librustc_codegen_llvm/base.rs | 10 +- src/librustc_codegen_llvm/builder.rs | 50 +-- .../interfaces/builder.rs | 334 +++++++++--------- 3 files changed, 200 insertions(+), 194 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index c1662df719e4f..c111e20b522b2 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -74,7 +74,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods, Backend}; use std::any::Any; use std::ffi::CString; @@ -160,12 +160,12 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, - lhs: Builder::Value, - rhs: Builder::Value, + lhs: ::Value, + rhs: ::Value, t: Ty<'tcx>, - ret_ty: Builder::Type, + ret_ty: ::Type, op: hir::BinOpKind -) -> Builder::Value { +) -> ::Value { let signed = match t.sty { ty::Float(_) => { let cmp = bin_op_to_fcmp_predicate(op); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index b335c30e0e199..b940d4c8bbdfb 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -12,7 +12,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; use common::{self, *}; use context::CodegenCx; -use type_; +use type_::Type; use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; @@ -59,7 +59,7 @@ bitflags! { impl Backend for Builder<'a, 'll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; - type Type = &'ll type_::Type; + type Type = &'ll Type; type TypeKind = llvm::TypeKind; type Context = &'ll llvm::Context; } @@ -67,6 +67,8 @@ impl Backend for Builder<'a, 'll, 'tcx, &'ll Value> { impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + type CodegenCx = CodegenCx<'ll, 'tcx, &'ll Value>; + fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, llfn: &'ll Value, @@ -455,7 +457,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value { + fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { let bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -463,7 +465,7 @@ impl BuilderMethods<'a, 'll, 'tcx> bx.dynamic_alloca(ty, name, align) } - fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -479,7 +481,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn array_alloca(&self, - ty: Self::Type, + ty: &'ll Type, len: &'ll Value, name: &str, align: Align) -> &'ll Value { @@ -641,77 +643,77 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Casts */ - fn trunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn sext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - fn fptoui(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - fn fptosi(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - fn uitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn sitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn fptrunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn fpext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - fn ptrtoint(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - fn inttoptr(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - fn bitcast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) @@ -719,14 +721,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } - fn intcast(&self, val: &'ll Value, dest_ty: Self::Type, is_signed: bool) -> &'ll Value { + fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { self.count_insn("intcast"); unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - fn pointercast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("pointercast"); unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) @@ -750,14 +752,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Miscellaneous instructions */ - fn empty_phi(&self, ty: Self::Type) -> &'ll Value { + fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - fn phi(&self, ty: Self::Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -770,7 +772,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], output: Self::Type, + inputs: &[&'ll Value], output: &'ll Type, volatile: bool, alignstack: bool, dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); @@ -829,7 +831,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } #[allow(dead_code)] - fn va_arg(&self, list: &'ll Value, ty: Self::Type) -> &'ll Value { + fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) @@ -995,7 +997,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn landing_pad(&self, ty: Self::Type, pers_fn: &'ll Value, + fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { @@ -1299,7 +1301,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn zext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value { + fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("zext"); unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index a0f6f749d27bd..39ac03a6a4f70 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -15,6 +15,8 @@ use rustc::ty::layout::{Align, Size}; use rustc::session::Session; use builder::MemFlags; use super::backend::Backend; +use super::type_::TypeMethods; +use super::consts::ConstMethods; use std::borrow::Cow; use std::ops::Range; @@ -22,254 +24,256 @@ use syntax::ast::AsmDialect; -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : Backend { +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { + + type CodegenCx : Backend + TypeMethods + ConstMethods; fn new_block<'b>( - cx: &'a CodegenCx<'ll, 'tcx, Self::Value>, - llfn: Self::Value, + cx: &'a Self::CodegenCx, + llfn: ::Value, name: &'b str ) -> Self; - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, Self::Value>) -> Self; + fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; fn sess(&self) -> &Session; - fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, Self::Value>; + fn cx(&self) -> &'a Self::CodegenCx; fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn llfn(&self) -> Self::Value; - fn llbb(&self) -> Self::BasicBlock; + fn llfn(&self) -> ::Value; + fn llbb(&self) -> ::BasicBlock; fn count_insn(&self, category: &str); - fn set_value_name(&self, value: Self::Value, name: &str); - fn position_at_end(&self, llbb: Self::BasicBlock); - fn position_at_start(&self, llbb: Self::BasicBlock); + fn set_value_name(&self, value: ::Value, name: &str); + fn position_at_end(&self, llbb: ::BasicBlock); + fn position_at_start(&self, llbb: ::BasicBlock); fn ret_void(&self); - fn ret(&self, v: Self::Value); - fn br(&self, dest: Self::BasicBlock); + fn ret(&self, v: ::Value); + fn br(&self, dest: ::BasicBlock); fn cond_br( &self, - cond: Self::Value, - then_llbb: Self::BasicBlock, - else_llbb: Self::BasicBlock, + cond: ::Value, + then_llbb: ::BasicBlock, + else_llbb: ::BasicBlock, ); fn switch( &self, - v: Self::Value, - else_llbb: Self::BasicBlock, + v: ::Value, + else_llbb: ::BasicBlock, num_cases: usize, - ) -> Self::Value; + ) -> ::Value; fn invoke( &self, - llfn: Self::Value, - args: &[Self::Value], - then: Self::BasicBlock, - catch: Self::BasicBlock, - bundle: Option<&OperandBundleDef<'ll, Self::Value>> - ) -> Self::Value; + llfn: ::Value, + args: &[::Value], + then: ::BasicBlock, + catch: ::BasicBlock, + bundle: Option<&OperandBundleDef<'ll, ::Value>> + ) -> ::Value; fn unreachable(&self); - fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn neg(&self, v: Self::Value) -> Self::Value; - fn fneg(&self, v: Self::Value) -> Self::Value; - fn not(&self, v: Self::Value) -> Self::Value; + fn add(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fadd(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fadd_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn sub(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fsub(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fsub_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn mul(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fmul(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fmul_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn udiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn exactudiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn sdiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn exactsdiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fdiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fdiv_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn urem(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn srem(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn frem(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn frem_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn shl(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn lshr(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn ashr(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn and(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn or(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn xor(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn neg(&self, v: ::Value) -> ::Value; + fn fneg(&self, v: ::Value) -> ::Value; + fn not(&self, v: ::Value) -> ::Value; - fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value; - fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn alloca(&self, ty: ::Type, name: &str, align: Align) -> ::Value; + fn dynamic_alloca(&self, ty: ::Type, name: &str, align: Align) -> ::Value; fn array_alloca( &self, - ty: Self::Type, - len: Self::Value, + ty: ::Type, + len: ::Value, name: &str, align: Align - ) -> Self::Value; + ) -> ::Value; - fn load(&self, ptr: Self::Value, align: Align) -> Self::Value; - fn volatile_load(&self, ptr: Self::Value) -> Self::Value; - fn atomic_load(&self, ptr: Self::Value, order: AtomicOrdering, align: Align) -> Self::Value; + fn load(&self, ptr: ::Value, align: Align) -> ::Value; + fn volatile_load(&self, ptr: ::Value) -> ::Value; + fn atomic_load(&self, ptr: ::Value, order: AtomicOrdering, align: Align) -> ::Value; - fn range_metadata(&self, load: Self::Value, range: Range); - fn nonnull_metadata(&self, load: Self::Value); + fn range_metadata(&self, load: ::Value, range: Range); + fn nonnull_metadata(&self, load: ::Value); - fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; + fn store(&self, val: ::Value, ptr: ::Value, align: Align) -> ::Value; fn atomic_store( &self, - val: Self::Value, - ptr: Self::Value, + val: ::Value, + ptr: ::Value, order: AtomicOrdering, align: Align ); fn store_with_flags( &self, - val: Self::Value, - ptr: Self::Value, + val: ::Value, + ptr: ::Value, align: Align, flags: MemFlags, - ) -> Self::Value; + ) -> ::Value; - fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; - fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; - fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value; + fn gep(&self, ptr: ::Value, indices: &[::Value]) -> ::Value; + fn inbounds_gep(&self, ptr: ::Value, indices: &[::Value]) -> ::Value; + fn struct_gep(&self, ptr: ::Value, idx: u64) -> ::Value; - fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; - fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; - fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn trunc(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn sext(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn fptoui(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn fptosi(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn uitofp(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn sitofp(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn fptrunc(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn fpext(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn ptrtoint(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn inttoptr(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn bitcast(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn intcast(&self, val: ::Value, dest_ty: ::Type, is_signed: bool) -> ::Value; + fn pointercast(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn icmp(&self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn fcmp(&self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn icmp(&self, op: IntPredicate, lhs: ::Value, rhs: ::Value) -> ::Value; + fn fcmp(&self, op: RealPredicate, lhs: ::Value, rhs: ::Value) -> ::Value; - fn empty_phi(&self, ty: Self::Type) -> Self::Value; - fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value; + fn empty_phi(&self, ty: ::Type) -> ::Value; + fn phi(&self, ty: ::Type, vals: &[::Value], bbs: &[::BasicBlock]) -> ::Value; fn inline_asm_call( &self, asm: *const c_char, cons: *const c_char, - inputs: &[Self::Value], - output: Self::Type, + inputs: &[::Value], + output: ::Type, volatile: bool, alignstack: bool, dia: AsmDialect - ) -> Self::Value; + ) -> ::Value; - fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn minnum(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn maxnum(&self, lhs: ::Value, rhs: ::Value) -> ::Value; fn select( - &self, cond: Self::Value, - then_val: Self::Value, - else_val: Self::Value, - ) -> Self::Value; + &self, cond: ::Value, + then_val: ::Value, + else_val: ::Value, + ) -> ::Value; - fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value; - fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value; + fn va_arg(&self, list: ::Value, ty: ::Type) -> ::Value; + fn extract_element(&self, vec: ::Value, idx: ::Value) -> ::Value; fn insert_element( - &self, vec: Self::Value, - elt: Self::Value, - idx: Self::Value, - ) -> Self::Value; - fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value; - fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value; - fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_add(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_and(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_or(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value; - fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value; - fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value; - fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value; + &self, vec: ::Value, + elt: ::Value, + idx: ::Value, + ) -> ::Value; + fn shuffle_vector(&self, v1: ::Value, v2: ::Value, mask: ::Value) -> ::Value; + fn vector_splat(&self, num_elts: usize, elt: ::Value) -> ::Value; + fn vector_reduce_fadd_fast(&self, acc: ::Value, src: ::Value) -> ::Value; + fn vector_reduce_fmul_fast(&self, acc: ::Value, src: ::Value) -> ::Value; + fn vector_reduce_add(&self, src: ::Value) -> ::Value; + fn vector_reduce_mul(&self, src: ::Value) -> ::Value; + fn vector_reduce_and(&self, src: ::Value) -> ::Value; + fn vector_reduce_or(&self, src: ::Value) -> ::Value; + fn vector_reduce_xor(&self, src: ::Value) -> ::Value; + fn vector_reduce_fmin(&self, src: ::Value) -> ::Value; + fn vector_reduce_fmax(&self, src: ::Value) -> ::Value; + fn vector_reduce_fmin_fast(&self, src: ::Value) -> ::Value; + fn vector_reduce_fmax_fast(&self, src: ::Value) -> ::Value; + fn vector_reduce_min(&self, src: ::Value, is_signed: bool) -> ::Value; + fn vector_reduce_max(&self, src: ::Value, is_signed: bool) -> ::Value; + fn extract_value(&self, agg_val: ::Value, idx: u64) -> ::Value; fn insert_value( &self, - agg_val: Self::Value, - elt: Self::Value, + agg_val: ::Value, + elt: ::Value, idx: u64 - ) -> Self::Value; + ) -> ::Value; fn landing_pad( &self, - ty: Self::Type, - pers_fn: Self::Value, + ty: ::Type, + pers_fn: ::Value, num_clauses: usize - ) -> Self::Value; - fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value); - fn set_cleanup(&self, landing_pad: Self::Value); - fn resume(&self, exn: Self::Value) -> Self::Value; + ) -> ::Value; + fn add_clause(&self, landing_pad: ::Value, clause: ::Value); + fn set_cleanup(&self, landing_pad: ::Value); + fn resume(&self, exn: ::Value) -> ::Value; fn cleanup_pad( &self, - parent: Option, - args: &[Self::Value] - ) -> Self::Value; + parent: Option<::Value>, + args: &[::Value] + ) -> ::Value; fn cleanup_ret( - &self, cleanup: Self::Value, - unwind: Option, - ) -> Self::Value; + &self, cleanup: ::Value, + unwind: Option<::BasicBlock>, + ) -> ::Value; fn catch_pad( &self, - parent: Self::Value, - args: &[Self::Value] - ) -> Self::Value; - fn catch_ret(&self, pad: Self::Value, unwind: Self::BasicBlock) -> Self::Value; + parent: ::Value, + args: &[::Value] + ) -> ::Value; + fn catch_ret(&self, pad: ::Value, unwind: ::BasicBlock) -> ::Value; fn catch_switch( &self, - parent: Option, - unwind: Option, + parent: Option<::Value>, + unwind: Option<::BasicBlock>, num_handlers: usize, - ) -> Self::Value; - fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock); - fn set_personality_fn(&self, personality: Self::Value); + ) -> ::Value; + fn add_handler(&self, catch_switch: ::Value, handler: ::BasicBlock); + fn set_personality_fn(&self, personality: ::Value); fn atomic_cmpxchg( &self, - dst: Self::Value, - cmp: Self::Value, - src: Self::Value, + dst: ::Value, + cmp: ::Value, + src: ::Value, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool, - ) -> Self::Value; + ) -> ::Value; fn atomic_rmw( &self, op: AtomicRmwBinOp, - dst: Self::Value, - src: Self::Value, + dst: ::Value, + src: ::Value, order: AtomicOrdering, - ) -> Self::Value; + ) -> ::Value; fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope); - fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); - fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); - fn set_invariant_load(&self, load: Self::Value); + fn add_case(&self, s: ::Value, on_val: ::Value, dest: ::BasicBlock); + fn add_incoming_to_phi(&self, phi: ::Value, val: ::Value, bb: ::BasicBlock); + fn set_invariant_load(&self, load: ::Value); fn check_store( &self, - val: Self::Value, - ptr: Self::Value - ) -> Self::Value; + val: ::Value, + ptr: ::Value + ) -> ::Value; fn check_call<'b>( &self, typ: &str, - llfn: Self::Value, - args: &'b [Self::Value] - ) -> Cow<'b, [Self::Value]> where [Self::Value] : ToOwned; - fn lifetime_start(&self, ptr: Self::Value, size: Size); - fn lifetime_end(&self, ptr: Self::Value, size: Size); + llfn: ::Value, + args: &'b [::Value] + ) -> Cow<'b, [::Value]> where [::Value] : ToOwned; + fn lifetime_start(&self, ptr: ::Value, size: Size); + fn lifetime_end(&self, ptr: ::Value, size: Size); - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size); + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ::Value, size: Size); - fn call(&self, llfn: Self::Value, args: &[Self::Value], - bundle: Option<&OperandBundleDef<'ll, Self::Value>>) -> Self::Value; - fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn call(&self, llfn: ::Value, args: &[::Value], + bundle: Option<&OperandBundleDef<'ll, ::Value>>) -> ::Value; + fn zext(&self, val: ::Value, dest_ty: ::Type) -> ::Value; } From 1ef9f8f403a227e534d2368d1f064c3631729a9d Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 7 Sep 2018 13:25:50 -0700 Subject: [PATCH 39/76] Generalized some base.rs methods --- src/librustc_codegen_llvm/base.rs | 63 ++- src/librustc_codegen_llvm/builder.rs | 16 +- src/librustc_codegen_llvm/common.rs | 25 +- .../interfaces/backend.rs | 5 +- .../interfaces/builder.rs | 479 +++++++++++++++--- src/librustc_codegen_llvm/interfaces/type_.rs | 3 +- src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_llvm/llvm/ffi.rs | 24 + src/librustc_codegen_llvm/type_.rs | 8 +- 9 files changed, 490 insertions(+), 135 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index c111e20b522b2..22ed0f8ed5be4 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -30,7 +30,7 @@ use super::CachedModuleCodegen; use abi; use back::write::{self, OngoingCodegen}; -use llvm::{self, TypeKind, get_param}; +use llvm::{self, get_param}; use metadata; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; @@ -55,7 +55,7 @@ use builder::{Builder, MemFlags}; use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, IntPredicate, RealPredicate}; +use common::{self, IntPredicate, RealPredicate, TypeKind}; use consts; use context::CodegenCx; use debuginfo; @@ -67,7 +67,6 @@ use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, Codege use rustc_codegen_utils::symbol_names_test; use time_graph; use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt}; -use type_::Type; use type_of::LayoutLlvmExt; use rustc::util::nodemap::{FxHashMap, DefIdSet}; use CrateInfo; @@ -334,21 +333,31 @@ pub fn coerce_unsized_into( } } -pub fn cast_shift_expr_rhs( - bx: &Builder<'_, 'll, '_, &'ll Value>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { +pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + op: hir::BinOpKind, + lhs: ::Value, + rhs: ::Value +) -> ::Value { cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) } -fn cast_shift_rhs<'ll, F, G>(bx: &Builder<'_, 'll, '_, &'ll Value>, - op: hir::BinOpKind, - lhs: &'ll Value, - rhs: &'ll Value, - trunc: F, - zext: G) - -> &'ll Value - where F: FnOnce(&'ll Value, &'ll Type) -> &'ll Value, - G: FnOnce(&'ll Value, &'ll Type) -> &'ll Value +fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + op: hir::BinOpKind, + lhs: ::Value, + rhs: ::Value, + trunc: F, + zext: G +) -> ::Value + where F: FnOnce( + ::Value, + ::Type + ) -> ::Value, + G: FnOnce( + ::Value, + ::Type + ) -> ::Value { // Shifts may have any size int on the rhs if op.is_shift() { @@ -390,10 +399,10 @@ pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { bx.call(assume_intrinsic, &[val], None); } -pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( - bx: &Builder<'_ ,'ll, '_, &'ll Value>, - val: &'ll Value -) -> &'ll Value { +pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll ,'tcx>>( + bx: &Builder, + val: ::Value +) -> ::Value { if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { @@ -401,22 +410,22 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>( } } -pub fn to_immediate( - bx: &Builder<'_, 'll, '_, &'ll Value>, - val: &'ll Value, +pub fn to_immediate<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + val: ::Value, layout: layout::TyLayout, -) -> &'ll Value { +) -> ::Value { if let layout::Abi::Scalar(ref scalar) = layout.abi { return to_immediate_scalar(bx, val, scalar); } val } -pub fn to_immediate_scalar( - bx: &Builder<'_, 'll, '_, &'ll Value>, - val: &'ll Value, +pub fn to_immediate_scalar<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + val: ::Value, scalar: &layout::Scalar, -) -> &'ll Value { +) -> ::Value { if scalar.is_bool() { return bx.trunc(val, bx.cx().type_i1()); } diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index b940d4c8bbdfb..539b38dfcef3c 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, Backend, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; use syntax; use std::borrow::Cow; @@ -56,14 +56,6 @@ bitflags! { } } -impl Backend for Builder<'a, 'll, 'tcx, &'ll Value> { - type Value = &'ll Value; - type BasicBlock = &'ll BasicBlock; - type Type = &'ll Type; - type TypeKind = llvm::TypeKind; - type Context = &'ll llvm::Context; -} - impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { @@ -1180,7 +1172,7 @@ impl BuilderMethods<'a, 'll, 'tcx> let stored_ty = self.cx.val_ty(val); let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - assert_eq!(self.cx.type_kind(dest_ptr_ty), llvm::TypeKind::Pointer); + assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); if dest_ptr_ty == stored_ptr_ty { ptr @@ -1199,11 +1191,11 @@ impl BuilderMethods<'a, 'll, 'tcx> args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers - while self.cx.type_kind(fn_ty) == llvm::TypeKind::Pointer { + while self.cx.type_kind(fn_ty) == TypeKind::Pointer { fn_ty = self.cx.element_type(fn_ty); } - assert!(self.cx.type_kind(fn_ty) == llvm::TypeKind::Function, + assert!(self.cx.type_kind(fn_ty) == TypeKind::Function, "builder::{} not passed a function, but {:?}", typ, fn_ty); let param_tys = self.cx.func_params_types(fn_ty); diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 27f45d0db2ada..4630ecfbce571 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -12,8 +12,7 @@ //! Code that is useful in various codegen modules. -use llvm::{self, TypeKind}; -use llvm::{True, False, Bool, BasicBlock}; +use llvm::{self, True, False, Bool, BasicBlock}; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; use abi; @@ -133,6 +132,27 @@ pub enum SynchronizationScope { CrossThread, } +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TypeKind { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPc_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, +} + /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -199,7 +219,6 @@ impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll Type; - type TypeKind = llvm::TypeKind; type Context = &'ll llvm::Context; } diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index af3d98d1dd179..2b79e2522470c 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -11,9 +11,8 @@ use std::fmt::Debug; pub trait Backend { - type Value : Debug + PartialEq; + type Value : Debug + PartialEq + Copy; type BasicBlock; - type Type : Debug + PartialEq; - type TypeKind; + type Type : Debug + PartialEq + Copy; type Context; } diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 39ac03a6a4f70..7d9babf2e3767 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -23,10 +23,10 @@ use std::ops::Range; use syntax::ast::AsmDialect; - pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { - type CodegenCx : Backend + TypeMethods + ConstMethods; + + type CodegenCx : 'a + Backend + TypeMethods + ConstMethods; fn new_block<'b>( cx: &'a Self::CodegenCx, @@ -69,37 +69,145 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { bundle: Option<&OperandBundleDef<'ll, ::Value>> ) -> ::Value; fn unreachable(&self); - fn add(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fadd(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fadd_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn sub(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fsub(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fsub_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn mul(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fmul(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fmul_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn udiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn exactudiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn sdiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn exactsdiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fdiv(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fdiv_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn urem(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn srem(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn frem(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn frem_fast(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn shl(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn lshr(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn ashr(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn and(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn or(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn xor(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn add( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fadd( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fadd_fast( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn sub( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fsub( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fsub_fast( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn mul( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fmul( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fmul_fast( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn udiv( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn exactudiv( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn sdiv( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn exactsdiv( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fdiv( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn fdiv_fast( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn urem( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn srem( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn frem( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn frem_fast( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn shl( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn lshr( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn ashr( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn and( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn or( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn xor( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn neg(&self, v: ::Value) -> ::Value; fn fneg(&self, v: ::Value) -> ::Value; fn not(&self, v: ::Value) -> ::Value; - fn alloca(&self, ty: ::Type, name: &str, align: Align) -> ::Value; - fn dynamic_alloca(&self, ty: ::Type, name: &str, align: Align) -> ::Value; + fn alloca( + &self, + ty: ::Type, + name: &str, align: Align + ) -> ::Value; + fn dynamic_alloca( + &self, + ty: ::Type, + name: &str, align: Align + ) -> ::Value; fn array_alloca( &self, ty: ::Type, @@ -108,14 +216,30 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { align: Align ) -> ::Value; - fn load(&self, ptr: ::Value, align: Align) -> ::Value; - fn volatile_load(&self, ptr: ::Value) -> ::Value; - fn atomic_load(&self, ptr: ::Value, order: AtomicOrdering, align: Align) -> ::Value; + fn load( + &self, + ptr: ::Value, + align: Align + ) -> ::Value; + fn volatile_load( + &self, + ptr: ::Value + ) -> ::Value; + fn atomic_load( + &self, + ptr: ::Value, + order: AtomicOrdering, align: Align + ) -> ::Value; fn range_metadata(&self, load: ::Value, range: Range); fn nonnull_metadata(&self, load: ::Value); - fn store(&self, val: ::Value, ptr: ::Value, align: Align) -> ::Value; + fn store( + &self, + val: ::Value, + ptr: ::Value, + align: Align + ) -> ::Value; fn atomic_store( &self, val: ::Value, @@ -131,29 +255,108 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { flags: MemFlags, ) -> ::Value; - fn gep(&self, ptr: ::Value, indices: &[::Value]) -> ::Value; - fn inbounds_gep(&self, ptr: ::Value, indices: &[::Value]) -> ::Value; - fn struct_gep(&self, ptr: ::Value, idx: u64) -> ::Value; + fn gep( + &self, + ptr: ::Value, + indices: &[::Value] + ) -> ::Value; + fn inbounds_gep( + &self, + ptr: ::Value, + indices: &[::Value] + ) -> ::Value; + fn struct_gep( + &self, + ptr: ::Value, + idx: u64 + ) -> ::Value; - fn trunc(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn sext(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn fptoui(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn fptosi(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn uitofp(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn sitofp(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn fptrunc(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn fpext(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn ptrtoint(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn inttoptr(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn bitcast(&self, val: ::Value, dest_ty: ::Type) -> ::Value; - fn intcast(&self, val: ::Value, dest_ty: ::Type, is_signed: bool) -> ::Value; - fn pointercast(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn trunc( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn sext( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn fptoui( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn fptosi( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn uitofp( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn sitofp( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn fptrunc( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn fpext( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn ptrtoint( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn inttoptr( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn bitcast( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; + fn intcast( + &self, + val: ::Value, + dest_ty: ::Type, is_signed: bool + ) -> ::Value; + fn pointercast( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; - fn icmp(&self, op: IntPredicate, lhs: ::Value, rhs: ::Value) -> ::Value; - fn fcmp(&self, op: RealPredicate, lhs: ::Value, rhs: ::Value) -> ::Value; + fn icmp( + &self, + op: IntPredicate, + lhs: ::Value, rhs: ::Value + ) -> ::Value; + fn fcmp( + &self, + op: RealPredicate, + lhs: ::Value, rhs: ::Value + ) -> ::Value; - fn empty_phi(&self, ty: ::Type) -> ::Value; - fn phi(&self, ty: ::Type, vals: &[::Value], bbs: &[::BasicBlock]) -> ::Value; + fn empty_phi( + &self, + ty: ::Type) -> ::Value; + fn phi( + &self, + ty: ::Type, + vals: &[::Value], + bbs: &[::BasicBlock] + ) -> ::Value; fn inline_asm_call( &self, asm: *const c_char, @@ -165,37 +368,108 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { dia: AsmDialect ) -> ::Value; - fn minnum(&self, lhs: ::Value, rhs: ::Value) -> ::Value; - fn maxnum(&self, lhs: ::Value, rhs: ::Value) -> ::Value; + fn minnum( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; + fn maxnum( + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn select( &self, cond: ::Value, then_val: ::Value, else_val: ::Value, ) -> ::Value; - fn va_arg(&self, list: ::Value, ty: ::Type) -> ::Value; - fn extract_element(&self, vec: ::Value, idx: ::Value) -> ::Value; + fn va_arg( + &self, + list: ::Value, + ty: ::Type + ) -> ::Value; + fn extract_element(&self, + vec: ::Value, + idx: ::Value + ) -> ::Value; fn insert_element( &self, vec: ::Value, elt: ::Value, idx: ::Value, ) -> ::Value; - fn shuffle_vector(&self, v1: ::Value, v2: ::Value, mask: ::Value) -> ::Value; - fn vector_splat(&self, num_elts: usize, elt: ::Value) -> ::Value; - fn vector_reduce_fadd_fast(&self, acc: ::Value, src: ::Value) -> ::Value; - fn vector_reduce_fmul_fast(&self, acc: ::Value, src: ::Value) -> ::Value; - fn vector_reduce_add(&self, src: ::Value) -> ::Value; - fn vector_reduce_mul(&self, src: ::Value) -> ::Value; - fn vector_reduce_and(&self, src: ::Value) -> ::Value; - fn vector_reduce_or(&self, src: ::Value) -> ::Value; - fn vector_reduce_xor(&self, src: ::Value) -> ::Value; - fn vector_reduce_fmin(&self, src: ::Value) -> ::Value; - fn vector_reduce_fmax(&self, src: ::Value) -> ::Value; - fn vector_reduce_fmin_fast(&self, src: ::Value) -> ::Value; - fn vector_reduce_fmax_fast(&self, src: ::Value) -> ::Value; - fn vector_reduce_min(&self, src: ::Value, is_signed: bool) -> ::Value; - fn vector_reduce_max(&self, src: ::Value, is_signed: bool) -> ::Value; - fn extract_value(&self, agg_val: ::Value, idx: u64) -> ::Value; + fn shuffle_vector( + &self, + v1: ::Value, + v2: ::Value, + mask: ::Value + ) -> ::Value; + fn vector_splat( + &self, + num_elts: usize, + elt: ::Value + ) -> ::Value; + fn vector_reduce_fadd_fast( + &self, + acc: ::Value, + src: ::Value + ) -> ::Value; + fn vector_reduce_fmul_fast( + &self, + acc: ::Value, + src: ::Value + ) -> ::Value; + fn vector_reduce_add( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_mul( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_and( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_or( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_xor( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_fmin( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_fmax( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_fmin_fast( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_fmax_fast( + &self, + src: ::Value + ) -> ::Value; + fn vector_reduce_min( + &self, + src: ::Value, + is_signed: bool + ) -> ::Value; + fn vector_reduce_max( + &self, + src: ::Value, + is_signed: bool + ) -> ::Value; + fn extract_value( + &self, + agg_val: ::Value, + idx: u64 + ) -> ::Value; fn insert_value( &self, agg_val: ::Value, @@ -209,9 +483,19 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { pers_fn: ::Value, num_clauses: usize ) -> ::Value; - fn add_clause(&self, landing_pad: ::Value, clause: ::Value); - fn set_cleanup(&self, landing_pad: ::Value); - fn resume(&self, exn: ::Value) -> ::Value; + fn add_clause( + &self, + landing_pad: ::Value, + clause: ::Value + ); + fn set_cleanup( + &self, + landing_pad: ::Value + ); + fn resume( + &self, + exn: ::Value + ) -> ::Value; fn cleanup_pad( &self, parent: Option<::Value>, @@ -226,14 +510,22 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { parent: ::Value, args: &[::Value] ) -> ::Value; - fn catch_ret(&self, pad: ::Value, unwind: ::BasicBlock) -> ::Value; + fn catch_ret( + &self, + pad: ::Value, + unwind: ::BasicBlock + ) -> ::Value; fn catch_switch( &self, parent: Option<::Value>, unwind: Option<::BasicBlock>, num_handlers: usize, ) -> ::Value; - fn add_handler(&self, catch_switch: ::Value, handler: ::BasicBlock); + fn add_handler( + &self, + catch_switch: ::Value, + handler: ::BasicBlock + ); fn set_personality_fn(&self, personality: ::Value); fn atomic_cmpxchg( @@ -253,8 +545,18 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { order: AtomicOrdering, ) -> ::Value; fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope); - fn add_case(&self, s: ::Value, on_val: ::Value, dest: ::BasicBlock); - fn add_incoming_to_phi(&self, phi: ::Value, val: ::Value, bb: ::BasicBlock); + fn add_case( + &self, + s: ::Value, + on_val: ::Value, + dest: ::BasicBlock + ); + fn add_incoming_to_phi( + &self, + phi: ::Value, + val: ::Value, + bb: ::BasicBlock + ); fn set_invariant_load(&self, load: ::Value); fn check_store( @@ -267,13 +569,22 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { typ: &str, llfn: ::Value, args: &'b [::Value] - ) -> Cow<'b, [::Value]> where [::Value] : ToOwned; + ) -> Cow<'b, [::Value]> + where [::Value] : ToOwned; fn lifetime_start(&self, ptr: ::Value, size: Size); fn lifetime_end(&self, ptr: ::Value, size: Size); fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ::Value, size: Size); - fn call(&self, llfn: ::Value, args: &[::Value], - bundle: Option<&OperandBundleDef<'ll, ::Value>>) -> ::Value; - fn zext(&self, val: ::Value, dest_ty: ::Type) -> ::Value; + fn call( + &self, + llfn: ::Value, + args: &[::Value], + bundle: Option<&OperandBundleDef<'ll, ::Value>> + ) -> ::Value; + fn zext( + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; } diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 69b5e6a93c17c..b24d00cfe5e01 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -9,6 +9,7 @@ // except according to those terms. use super::backend::Backend; +use common::TypeKind; pub trait TypeMethods : Backend { fn type_void(&self) -> Self::Type; @@ -30,7 +31,7 @@ pub trait TypeMethods : Backend { fn type_named_struct(&self, name: &str) -> Self::Type; fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; - fn type_kind(&self, ty: Self::Type) -> Self::TypeKind; + fn type_kind(&self, ty: Self::Type) -> TypeKind; fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool); fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index fb15d126df12d..a1bdb015e59b5 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -12,7 +12,7 @@ use attributes; use intrinsics::{self, Intrinsic}; -use llvm::{self, TypeKind}; +use llvm; use abi::{Abi, FnType, LlvmType, PassMode}; use mir::place::PlaceRef; use mir::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index fbf0f7473dd4b..87bbf3f7ab555 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -228,6 +228,30 @@ pub enum TypeKind { Token = 16, } +impl TypeKind { + pub fn to_generic(self) -> common::TypeKind { + match self { + TypeKind::Void => common::TypeKind::Void, + TypeKind::Half => common::TypeKind::Half, + TypeKind::Float => common::TypeKind::Float, + TypeKind::Double => common::TypeKind::Double, + TypeKind::X86_FP80 => common::TypeKind::X86_FP80, + TypeKind::FP128 => common::TypeKind::FP128, + TypeKind::PPc_FP128 => common::TypeKind::PPc_FP128, + TypeKind::Label => common::TypeKind::Label, + TypeKind::Integer => common::TypeKind::Integer, + TypeKind::Function => common::TypeKind::Function, + TypeKind::Struct => common::TypeKind::Struct, + TypeKind::Array => common::TypeKind::Array, + TypeKind::Pointer => common::TypeKind::Pointer, + TypeKind::Vector => common::TypeKind::Vector, + TypeKind::Metadata => common::TypeKind::Metadata, + TypeKind::X86_MMX => common::TypeKind::X86_MMX, + TypeKind::Token => common::TypeKind::Token, + } + } +} + /// LLVMAtomicRmwBinOp #[derive(Copy, Clone)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index efcb617ebe4d3..ed287d7185335 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -13,16 +13,16 @@ pub use llvm::Type; use llvm; -use llvm::{Bool, False, True, TypeKind}; - +use llvm::{Bool, False, True}; use context::CodegenCx; use value::Value; use interfaces::TypeMethods; + use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use rustc_data_structures::small_c_str::SmallCStr; -use common; +use common::{self, TypeKind}; use std::fmt; @@ -175,7 +175,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { - llvm::LLVMRustGetTypeKind(ty) + llvm::LLVMRustGetTypeKind(ty).to_generic() } } From 5428c7ea2c892622e462c55eecfed4c8ad29afb0 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 7 Sep 2018 15:39:39 -0700 Subject: [PATCH 40/76] Generalized memset and memcpy --- src/librustc_codegen_llvm/abi.rs | 2 +- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/base.rs | 40 +- src/librustc_codegen_llvm/builder.rs | 2 +- src/librustc_codegen_llvm/callee.rs | 2 +- src/librustc_codegen_llvm/common.rs | 4 +- src/librustc_codegen_llvm/consts.rs | 2 +- src/librustc_codegen_llvm/context.rs | 624 +++++++++--------- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- .../interfaces/builder.rs | 3 +- .../interfaces/intrinsic.rs | 25 + src/librustc_codegen_llvm/interfaces/mod.rs | 4 +- src/librustc_codegen_llvm/interfaces/type_.rs | 33 +- src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_llvm/meth.rs | 2 +- src/librustc_codegen_llvm/mir/block.rs | 2 +- src/librustc_codegen_llvm/mir/constant.rs | 2 +- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 2 +- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- src/librustc_codegen_llvm/type_.rs | 30 +- src/librustc_codegen_llvm/type_of.rs | 2 +- 23 files changed, 430 insertions(+), 363 deletions(-) create mode 100644 src/librustc_codegen_llvm/interfaces/intrinsic.rs diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 6f0766255afb4..0125ab3a257a9 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -19,7 +19,7 @@ use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods}; use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty}; diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 558708a4fb9e5..cf45675b584ec 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -15,7 +15,7 @@ use builder::Builder; use value::Value; use rustc::hir; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods}; use mir::place::PlaceRef; use mir::operand::OperandValue; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 22ed0f8ed5be4..207011a5d6ac2 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -73,7 +73,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods, Backend}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, Backend, DerivedIntrinsicMethods}; use std::any::Any; use std::ffi::CString; @@ -432,11 +432,11 @@ pub fn to_immediate_scalar<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx val } -pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( - bx: &Builder<'_ ,'ll, '_, &'ll Value>, - dst: &'ll Value, - src: &'ll Value, - n_bytes: &'ll Value, +pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + dst: ::Value, + src: ::Value, + n_bytes: ::Value, align: Align, flags: MemFlags, ) { @@ -448,21 +448,21 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>( return; } let cx = bx.cx(); - let ptr_width = &cx.sess().target.target.target_pointer_width; + let ptr_width = &bx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); let memcpy = cx.get_intrinsic(&key); let src_ptr = bx.pointercast(src, cx.type_i8p()); let dst_ptr = bx.pointercast(dst, cx.type_i8p()); - let size = bx.intcast(n_bytes, cx.isize_ty, false); + let size = bx.intcast(n_bytes, cx.type_isize(), false); let align = cx.const_i32(align.abi() as i32); let volatile = cx.const_bool(flags.contains(MemFlags::VOLATILE)); bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>( - bx: &Builder<'_ ,'ll, '_, &'ll Value>, - dst: &'ll Value, - src: &'ll Value, +pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + dst: ::Value, + src: ::Value, layout: TyLayout<'tcx>, align: Align, flags: MemFlags, @@ -475,15 +475,15 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>( call_memcpy(bx, dst, src, bx.cx().const_usize(size), align, flags); } -pub fn call_memset( - bx: &Builder<'_, 'll, '_, &'ll Value>, - ptr: &'ll Value, - fill_byte: &'ll Value, - size: &'ll Value, - align: &'ll Value, +pub fn call_memset<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Builder, + ptr: ::Value, + fill_byte: ::Value, + size: ::Value, + align: ::Value, volatile: bool, -) -> &'ll Value { - let ptr_width = &bx.cx().sess().target.target.target_pointer_width; +) -> ::Value { + let ptr_width = &bx.sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key); let volatile = bx.cx().const_bool(volatile); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 539b38dfcef3c..51da73648746b 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; use syntax; use std::borrow::Cow; diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 58d67149b5ccd..e46b1055affdd 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -22,7 +22,7 @@ use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::TypeMethods; +use interfaces::BaseTypeMethods; use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 4630ecfbce571..76765fad2c33f 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -23,7 +23,7 @@ use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{Backend, ConstMethods, TypeMethods}; +use interfaces::{Backend, ConstMethods, BaseTypeMethods}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; @@ -51,7 +51,7 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) } -pub struct OperandBundleDef<'a, Value : 'a> { +pub struct OperandBundleDef<'a, Value> { pub name: &'a str, pub val: Value } diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index d806ddd1fcddd..5b5e81da1786c 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -24,7 +24,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::TypeMethods; +use interfaces::{BaseTypeMethods, DerivedTypeMethods}; use rustc::ty::layout::{Align, LayoutOf}; diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 1b7b6953c6410..ef8810d97a621 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -23,7 +23,8 @@ use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; -use interfaces::TypeMethods; +use interfaces::{BaseTypeMethods, DerivedTypeMethods, + IntrinsicMethods, BaseIntrinsicMethods, DerivedIntrinsicMethods}; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -321,16 +322,328 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { pub fn sess<'a>(&'a self) -> &'a Session { &self.tcx.sess } +} + +impl BaseIntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> {} - pub fn get_intrinsic(&self, key: &str) -> &'b Value { +impl DerivedIntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> { + fn get_intrinsic(&self, key: &str) -> &'b Value { if let Some(v) = self.intrinsics.borrow().get(key).cloned() { return v; } declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) } + + /// Declare any llvm intrinsics that you might need + fn declare_intrinsic( + &self, + key: &str + ) -> Option<&'b Value> { + macro_rules! ifn { + ($name:expr, fn() -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(&self, $name, &self.type_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + &self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn(...) -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(&self, $name, &self.type_variadic_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + &self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(&self, $name, &self.type_func(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f, false); + &self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + } + macro_rules! mk_struct { + ($($field_ty:expr),*) => (&self.type_struct( &[$($field_ty),*], false)) + } + + let i8p = &self.type_i8p(); + let void = &self.type_void(); + let i1 = &self.type_i1(); + let t_i8 = &self.type_i8(); + let t_i16 = &self.type_i16(); + let t_i32 = &self.type_i32(); + let t_i64 = &self.type_i64(); + let t_i128 = &self.type_i128(); + let t_f32 = &self.type_f32(); + let t_f64 = &self.type_f64(); + + let t_v2f32 = &self.type_vector(t_f32, 2); + let t_v4f32 = &self.type_vector(t_f32, 4); + let t_v8f32 = &self.type_vector(t_f32, 8); + let t_v16f32 = &self.type_vector(t_f32, 16); + + let t_v2f64 = &self.type_vector(t_f64, 2); + let t_v4f64 = &self.type_vector(t_f64, 4); + let t_v8f64 = &self.type_vector(t_f64, 8); + + ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + + ifn!("llvm.trap", fn() -> void); + ifn!("llvm.debugtrap", fn() -> void); + ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + + ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); + ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); + ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); + ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); + ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); + ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); + ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); + + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); + ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.round.f32", fn(t_f32) -> t_f32); + ifn!("llvm.round.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); + ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); + ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); + ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); + ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); + ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); + ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); + ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); + ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.localescape", fn(...) -> void); + ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); + ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + + ifn!("llvm.assume", fn(i1) -> void); + ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + + if self.sess().opts.debuginfo != DebugInfo::None { + ifn!("llvm.dbg.declare", fn(&self.type_metadata(), &self.type_metadata()) -> void); + ifn!("llvm.dbg.value", fn(&self.type_metadata(), t_i64, &self.type_metadata()) -> void); + } + return None; + } } +impl IntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> {} + impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { /// Generate a new symbol name with the given prefix. This symbol name must /// only be used for definitions with `internal` or `private` linkage. @@ -480,310 +793,3 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx, &'ll Value> { }) } } - -/// Declare any llvm intrinsics that you might need -fn declare_intrinsic( - cx: &CodegenCx<'ll, '_, &'ll Value>, - key: &str -) -> Option<&'ll Value> { - macro_rules! ifn { - ($name:expr, fn() -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, cx.type_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn(...) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, cx.type_variadic_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, cx.type_func(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - } - macro_rules! mk_struct { - ($($field_ty:expr),*) => (cx.type_struct( &[$($field_ty),*], false)) - } - - let i8p = cx.type_i8p(); - let void = cx.type_void(); - let i1 = cx.type_i1(); - let t_i8 = cx.type_i8(); - let t_i16 = cx.type_i16(); - let t_i32 = cx.type_i32(); - let t_i64 = cx.type_i64(); - let t_i128 = cx.type_i128(); - let t_f32 = cx.type_f32(); - let t_f64 = cx.type_f64(); - - let t_v2f32 = cx.type_vector(t_f32, 2); - let t_v4f32 = cx.type_vector(t_f32, 4); - let t_v8f32 = cx.type_vector(t_f32, 8); - let t_v16f32 = cx.type_vector(t_f32, 16); - - let t_v2f64 = cx.type_vector(t_f64, 2); - let t_v4f64 = cx.type_vector(t_f64, 4); - let t_v8f64 = cx.type_vector(t_f64, 8); - - ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); - - ifn!("llvm.trap", fn() -> void); - ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); - - ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); - ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); - ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); - ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); - ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); - ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); - ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); - ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); - ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); - - ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); - ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); - ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); - ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); - ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); - ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); - ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); - ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); - ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); - ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); - ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); - ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.round.f32", fn(t_f32) -> t_f32); - ifn!("llvm.round.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); - ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); - ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); - ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); - ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); - ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); - ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); - ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); - - ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); - ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); - - ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); - - if cx.sess().opts.debuginfo != DebugInfo::None { - ifn!("llvm.dbg.declare", fn(cx.type_metadata(), cx.type_metadata()) -> void); - ifn!("llvm.dbg.value", fn(cx.type_metadata(), t_i64, cx.type_metadata()) -> void); - } - - None -} diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 41a4962fcc310..9794fabbdc054 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -17,7 +17,7 @@ use builder::Builder; use declare; use rustc::session::config::DebugInfo; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods}; use syntax::attr; diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 7d9babf2e3767..afa22dac40e4e 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -17,6 +17,7 @@ use builder::MemFlags; use super::backend::Backend; use super::type_::TypeMethods; use super::consts::ConstMethods; +use super::intrinsic::IntrinsicMethods; use std::borrow::Cow; use std::ops::Range; @@ -26,7 +27,7 @@ use syntax::ast::AsmDialect; pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { - type CodegenCx : 'a + Backend + TypeMethods + ConstMethods; + type CodegenCx : 'a + Backend + TypeMethods + ConstMethods + IntrinsicMethods; fn new_block<'b>( cx: &'a Self::CodegenCx, diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs new file mode 100644 index 0000000000000..cc50f815ffca6 --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/intrinsic.rs @@ -0,0 +1,25 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::backend::Backend; + +pub trait BaseIntrinsicMethods : Backend { + +} + +pub trait DerivedIntrinsicMethods : Backend { + fn get_intrinsic(&self, key: &str) -> Self::Value; + fn declare_intrinsic( + &self, + key: &str + ) -> Option; +} + +pub trait IntrinsicMethods : BaseIntrinsicMethods + DerivedIntrinsicMethods {} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 93c46aed4acbe..def4b49f27d65 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -12,8 +12,10 @@ mod builder; mod backend; mod consts; mod type_; +mod intrinsic; pub use self::builder::BuilderMethods; pub use self::backend::Backend; pub use self::consts::ConstMethods; -pub use self::type_::TypeMethods; +pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods}; +pub use self::intrinsic::{IntrinsicMethods, BaseIntrinsicMethods, DerivedIntrinsicMethods}; diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index b24d00cfe5e01..894bf22f445b7 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -10,8 +10,10 @@ use super::backend::Backend; use common::TypeKind; +use syntax::ast; +use rustc::ty::layout::{self, Align, Size}; -pub trait TypeMethods : Backend { +pub trait BaseTypeMethods : Backend { fn type_void(&self) -> Self::Type; fn type_metadata(&self) -> Self::Type; fn type_i1(&self) -> Self::Type; @@ -42,3 +44,32 @@ pub trait TypeMethods : Backend { fn val_ty(&self, v: Self::Value) -> Self::Type; } + +pub trait DerivedTypeMethods : Backend { + fn type_bool(&self) -> Self::Type; + fn type_char(&self) -> Self::Type; + fn type_i8p(&self) -> Self::Type; + fn type_isize(&self) -> Self::Type; + fn type_int(&self) -> Self::Type; + fn type_int_from_ty( + &self, + t: ast::IntTy + ) -> Self::Type; + fn type_uint_from_ty( + &self, + t: ast::UintTy + ) -> Self::Type; + fn type_float_from_ty( + &self, + t: ast::FloatTy + ) -> Self::Type; + fn type_from_integer(&self, i: layout::Integer) -> Self::Type; + fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type; + fn type_padding_filler( + &self, + size: Size, + align: Align + ) -> Self::Type; +} + +pub trait TypeMethods : BaseTypeMethods + DerivedTypeMethods {} diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index a1bdb015e59b5..6c281a8d85ca5 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -31,7 +31,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; use rustc::session::Session; use syntax_pos::Span; diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 83100447a5a4c..161d205b7dd28 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -16,7 +16,7 @@ use consts; use monomorphize; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods}; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 3a008494f8ead..cb6eb54ed7e66 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -26,7 +26,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; use syntax::symbol::Symbol; use syntax_pos::Pos; diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 2d775d3124158..15aec86c784e8 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -26,7 +26,7 @@ use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods}; use super::super::callee; use super::FunctionCx; diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index ce6d3f9e0daa1..e4bd8f7450ba5 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -23,7 +23,7 @@ use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebug use monomorphize::Instance; use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; use value::Value; -use interfaces::{BuilderMethods, ConstMethods}; +use interfaces::{BuilderMethods, ConstMethods, DerivedTypeMethods}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 78d2ba5a5f01c..64b5219530a32 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -21,7 +21,7 @@ use value::Value; use type_of::LayoutLlvmExt; use glue; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedIntrinsicMethods}; use std::fmt; diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 6be273d8866ed..04ea0aba039b3 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -22,7 +22,7 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index be4d645080302..dd9694b24ef91 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -26,7 +26,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, TypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedIntrinsicMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index ed287d7185335..7c05f631f3c4b 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -16,7 +16,7 @@ use llvm; use llvm::{Bool, False, True}; use context::CodegenCx; use value::Value; -use interfaces::TypeMethods; +use interfaces::{BaseTypeMethods, DerivedTypeMethods, TypeMethods}; use syntax::ast; @@ -42,7 +42,7 @@ impl fmt::Debug for Type { } } -impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { +impl BaseTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn type_void(&self) -> &'ll Type { unsafe { @@ -263,25 +263,25 @@ impl Type { } } -impl CodegenCx<'ll, 'tcx, &'ll Value> { +impl DerivedTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { - pub fn type_bool(&self) -> &'ll Type { + fn type_bool(&self) -> &'ll Type { &self.type_i8() } - pub fn type_char(&self) -> &'ll Type { + fn type_char(&self) -> &'ll Type { &self.type_i32() } - pub fn type_i8p(&self) -> &'ll Type { + fn type_i8p(&self) -> &'ll Type { &self.type_ptr_to(&self.type_i8()) } - pub fn type_isize(&self) -> &'ll Type { + fn type_isize(&self) -> &'ll Type { &self.isize_ty } - pub fn type_int(&self) -> &'ll Type { + fn type_int(&self) -> &'ll Type { match &self.sess().target.target.target_c_int_width[..] { "16" => &self.type_i16(), "32" => &self.type_i32(), @@ -290,7 +290,7 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { } } - pub fn type_int_from_ty( + fn type_int_from_ty( &self, t: ast::IntTy ) -> &'ll Type { @@ -304,7 +304,7 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { } } - pub fn type_uint_from_ty( + fn type_uint_from_ty( &self, t: ast::UintTy ) -> &'ll Type { @@ -318,7 +318,7 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { } } - pub fn type_float_from_ty( + fn type_float_from_ty( &self, t: ast::FloatTy ) -> &'ll Type { @@ -328,7 +328,7 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { } } - pub fn type_from_integer(&self, i: layout::Integer) -> &'ll Type { + fn type_from_integer(&self, i: layout::Integer) -> &'ll Type { use rustc::ty::layout::Integer::*; match i { I8 => &self.type_i8(), @@ -341,7 +341,7 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { /// Return a LLVM type that has at most the required alignment, /// as a conservative approximation for unknown pointee types. - pub fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type { + fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = layout::Integer::approximate_abi_align(self, align); &self.type_from_integer(ity) @@ -349,7 +349,7 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. - pub fn type_padding_filler( + fn type_padding_filler( &self, size: Size, align: Align @@ -361,3 +361,5 @@ impl CodegenCx<'ll, 'tcx, &'ll Value> { &self.type_array(&self.type_from_integer(unit), size / unit_size) } } + +impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index a5aaede1fc84c..b2cd592f24b57 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -17,7 +17,7 @@ use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; use value::Value; -use interfaces::TypeMethods; +use interfaces::{BaseTypeMethods, DerivedTypeMethods}; use std::fmt::Write; From 20a7eef82c07310134ab9a7219faeeaffa9ca3fb Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 10 Sep 2018 16:28:47 +0200 Subject: [PATCH 41/76] Added StaticMethods trait --- src/librustc_codegen_llvm/consts.rs | 624 +++++++++--------- src/librustc_codegen_llvm/interfaces/mod.rs | 2 + .../interfaces/statics.rs | 36 + src/librustc_codegen_llvm/intrinsic.rs | 4 +- src/librustc_codegen_llvm/meth.rs | 5 +- src/librustc_codegen_llvm/mir/block.rs | 21 +- src/librustc_codegen_llvm/mir/constant.rs | 15 +- src/librustc_codegen_llvm/mir/place.rs | 11 +- src/librustc_codegen_llvm/mir/rvalue.rs | 5 +- src/librustc_codegen_llvm/mono_item.rs | 4 +- 10 files changed, 385 insertions(+), 342 deletions(-) create mode 100644 src/librustc_codegen_llvm/interfaces/statics.rs diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 5b5e81da1786c..6fbb21e8412bc 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -24,7 +24,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::{BaseTypeMethods, DerivedTypeMethods}; +use interfaces::{BaseTypeMethods, DerivedTypeMethods, StaticMethods}; use rustc::ty::layout::{Align, LayoutOf}; @@ -32,17 +32,6 @@ use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; use std::ffi::{CStr, CString}; -pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstPointerCast(val, ty) - } -} - -pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstBitCast(val, ty) - } -} fn set_global_alignment(cx: &CodegenCx<'ll, '_, &'ll Value>, gv: &'ll Value, @@ -63,178 +52,6 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_, &'ll Value>, } } -pub fn addr_of_mut( - cx: &CodegenCx<'ll, '_, &'ll Value>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - unsafe { - let gv = match kind { - Some(kind) if !cx.tcx.sess.fewer_names() => { - let name = cx.generate_local_symbol_name(kind); - let gv = declare::define_global(cx, &name[..], - cx.val_ty(cv)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", name); - }); - llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); - gv - }, - _ => declare::define_private_global(cx, cx.val_ty(cv)), - }; - llvm::LLVMSetInitializer(gv, cv); - set_global_alignment(cx, gv, align); - SetUnnamedAddr(gv, true); - gv - } -} - -pub fn addr_of( - cx: &CodegenCx<'ll, '_, &'ll Value>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - if let Some(&gv) = cx.const_globals.borrow().get(&cv) { - unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - let llalign = align.abi() as u32; - if llalign > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, llalign); - } - } - return gv; - } - let gv = addr_of_mut(cx, cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } - cx.const_globals.borrow_mut().insert(cv, gv); - gv -} - -pub fn get_static(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> &'ll Value { - let instance = Instance::mono(cx.tcx, def_id); - if let Some(&g) = cx.instances.borrow().get(&instance) { - return g; - } - - let defined_in_current_codegen_unit = cx.codegen_unit - .items() - .contains_key(&MonoItem::Static(def_id)); - assert!(!defined_in_current_codegen_unit, - "consts::get_static() should always hit the cache for \ - statics defined in the same CGU, but did not for `{:?}`", - def_id); - - let ty = instance.ty(cx.tcx); - let sym = cx.tcx.symbol_name(instance).as_str(); - - debug!("get_static: sym={} instance={:?}", sym, instance); - - let g = if let Some(id) = cx.tcx.hir.as_local_node_id(def_id) { - - let llty = cx.layout_of(ty).llvm_type(cx); - let (g, attrs) = match cx.tcx.hir.get(id) { - Node::Item(&hir::Item { - ref attrs, span, node: hir::ItemKind::Static(..), .. - }) => { - if declare::get_declared_value(cx, &sym[..]).is_some() { - span_bug!(span, "Conflicting symbol names for static?"); - } - - let g = declare::define_global(cx, &sym[..], llty).unwrap(); - - if !cx.tcx.is_reachable_non_generic(def_id) { - unsafe { - llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); - } - } - - (g, attrs) - } - - Node::ForeignItem(&hir::ForeignItem { - ref attrs, span, node: hir::ForeignItemKind::Static(..), .. - }) => { - let fn_attrs = cx.tcx.codegen_fn_attrs(def_id); - (check_and_apply_linkage(cx, &fn_attrs, ty, sym, Some(span)), attrs) - } - - item => bug!("get_static: expected static, found {:?}", item) - }; - - debug!("get_static: sym={} attrs={:?}", sym, attrs); - - for attr in attrs { - if attr.check_name("thread_local") { - llvm::set_thread_local_mode(g, cx.tls_model); - } - } - - g - } else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - debug!("get_static: sym={} item_attr={:?}", sym, cx.tcx.item_attrs(def_id)); - - let attrs = cx.tcx.codegen_fn_attrs(def_id); - let g = check_and_apply_linkage(cx, &attrs, ty, sym, None); - - // Thread-local statics in some other crate need to *always* be linked - // against in a thread-local fashion, so we need to be sure to apply the - // thread-local attribute locally if it was present remotely. If we - // don't do this then linker errors can be generated where the linker - // complains that one object files has a thread local version of the - // symbol and another one doesn't. - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - } - - let needs_dll_storage_attr = - cx.use_dll_storage_attrs && !cx.tcx.is_foreign_item(def_id) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!(!(cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - cx.tcx.sess.target.target.options.is_like_msvc && - cx.tcx.sess.opts.cg.prefer_dynamic)); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e. it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to - // make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !cx.tcx.is_codegened_item(def_id) { - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - } - g - }; - - if cx.use_dll_storage_attrs && cx.tcx.is_dllimport_foreign_item(def_id) { - // For foreign (native) libs we know the exact storage type to use. - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - - cx.instances.borrow_mut().insert(instance, g); - g -} - fn check_and_apply_linkage( cx: &CodegenCx<'ll, 'tcx, &'ll Value>, attrs: &CodegenFnAttrs, @@ -294,146 +111,337 @@ fn check_and_apply_linkage( } } -pub fn codegen_static<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx, &'a Value>, - def_id: DefId, - is_mutable: bool, -) { +pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { - let attrs = cx.tcx.codegen_fn_attrs(def_id); + llvm::LLVMConstPointerCast(val, ty) + } +} - let (v, alloc) = match ::mir::codegen_static_initializer(cx, def_id) { - Ok(v) => v, - // Error has already been reported - Err(_) => return, - }; +impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - let g = get_static(cx, def_id); + fn static_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + ptrcast(val, ty) + } - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = cx.val_ty(v); - let v = if val_llty == cx.type_i1() { - val_llty = cx.type_i8(); - llvm::LLVMConstZExt(v, val_llty) - } else { - v - }; + fn static_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstBitCast(val, ty) + } + } + + fn static_addr_of_mut( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + unsafe { + let gv = match kind { + Some(kind) if !&self.tcx.sess.fewer_names() => { + let name = &self.generate_local_symbol_name(kind); + let gv = declare::define_global(&self, &name[..], + &self.val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); + }); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + gv + }, + _ => declare::define_private_global(&self, &self.val_ty(cv)), + }; + llvm::LLVMSetInitializer(gv, cv); + set_global_alignment(&self, gv, align); + SetUnnamedAddr(gv, true); + gv + } + } + + fn static_addr_of( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + if let Some(&gv) = &self.const_globals.borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } + } + return gv; + } + let gv = &self.static_addr_of_mut(cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + &self.const_globals.borrow_mut().insert(cv, gv); + gv + } + + fn get_static(&self, def_id: DefId) -> &'ll Value { + let instance = Instance::mono(self.tcx, def_id); + if let Some(&g) = &self.instances.borrow().get(&instance) { + return g; + } + + let defined_in_current_codegen_unit = &self.codegen_unit + .items() + .contains_key(&MonoItem::Static(def_id)); + assert!(!defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id); + + let ty = instance.ty(self.tcx); + let sym = self.tcx.symbol_name(instance).as_str(); + + debug!("get_static: sym={} instance={:?}", sym, instance); + + let g = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { + + let llty = &self.layout_of(ty).llvm_type(&self); + let (g, attrs) = match &self.tcx.hir.get(id) { + Node::Item(&hir::Item { + ref attrs, span, node: hir::ItemKind::Static(..), .. + }) => { + if declare::get_declared_value(&self, &sym[..]).is_some() { + span_bug!(span, "Conflicting symbol names for static?"); + } + + let g = declare::define_global(&self, &sym[..], llty).unwrap(); + + if !&self.tcx.is_reachable_non_generic(def_id) { + unsafe { + llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); + } + } + + (g, attrs) + } + + Node::ForeignItem(&hir::ForeignItem { + ref attrs, span, node: hir::ForeignItemKind::Static(..), .. + }) => { + let fn_attrs = &self.tcx.codegen_fn_attrs(def_id); + (check_and_apply_linkage(&self, &fn_attrs, ty, sym, Some(span)), attrs) + } + + item => bug!("get_static: expected static, found {:?}", item) + }; + + debug!("get_static: sym={} attrs={:?}", sym, attrs); + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local_mode(g, self.tls_model); + } + } - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - let g = if val_llty == llty { g } else { - // If we created the global with the wrong type, - // correct the type. - let empty_string = const_cstr!(""); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); - let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(g, empty_string.as_ptr()); - - let linkage = llvm::LLVMRustGetLinkage(g); - let visibility = llvm::LLVMRustGetVisibility(g); - - let new_g = llvm::LLVMRustGetOrInsertGlobal( - cx.llmod, name_string.as_ptr(), val_llty); - - llvm::LLVMRustSetLinkage(new_g, linkage); - llvm::LLVMRustSetVisibility(new_g, visibility); - - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - cx.statics_to_rauw.borrow_mut().push((g, new_g)); - new_g + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + debug!("get_static: sym={} item_attr={:?}", sym, &self.tcx.item_attrs(def_id)); + + let attrs = &self.tcx.codegen_fn_attrs(def_id); + let g = check_and_apply_linkage(&self, &attrs, ty, sym, None); + + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + } + + let needs_dll_storage_attr = + self.use_dll_storage_attrs && !&self.tcx.is_foreign_item(def_id) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the attrs. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !&self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!(!(self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + self.tcx.sess.target.target.options.is_like_msvc && + self.tcx.sess.opts.cg.prefer_dynamic)); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e. it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to + // make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !&self.tcx.is_codegened_item(def_id) { + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + } + g }; - set_global_alignment(cx, g, cx.align_of(ty)); - llvm::LLVMSetInitializer(g, v); - - // As an optimization, all shared statics which do not have interior - // mutability are placed into read-only memory. - if !is_mutable { - if cx.type_is_freeze(ty) { - llvm::LLVMSetGlobalConstant(g, llvm::True); + + if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + // For foreign (native) libs we know the exact storage type to use. + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); } } - debuginfo::create_global_var_metadata(cx, def_id, g); - - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - - // Do not allow LLVM to change the alignment of a TLS on macOS. - // - // By default a global's alignment can be freely increased. - // This allows LLVM to generate more performant instructions - // e.g. using load-aligned into a SIMD register. - // - // However, on macOS 10.10 or below, the dynamic linker does not - // respect any alignment given on the TLS (radar 24221680). - // This will violate the alignment assumption, and causing segfault at runtime. - // - // This bug is very easy to trigger. In `println!` and `panic!`, - // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, - // which the values would be `mem::replace`d on initialization. - // The implementation of `mem::replace` will use SIMD - // whenever the size is 32 bytes or higher. LLVM notices SIMD is used - // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, - // which macOS's dyld disregarded and causing crashes - // (see issues #51794, #51758, #50867, #48866 and #44056). - // - // To workaround the bug, we trick LLVM into not increasing - // the global's alignment by explicitly assigning a section to it - // (equivalent to automatically generating a `#[link_section]` attribute). - // See the comment in the `GlobalValue::canIncreaseAlignment()` function - // of `lib/IR/Globals.cpp` for why this works. - // - // When the alignment is not increased, the optimized `mem::replace` - // will use load-unaligned instructions instead, and thus avoiding the crash. - // - // We could remove this hack whenever we decide to drop macOS 10.10 support. - if cx.tcx.sess.target.target.options.is_like_osx { - let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") - } else { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") - }; - llvm::LLVMSetSection(g, sect_name.as_ptr()); + &self.instances.borrow_mut().insert(instance, g); + g + } + + fn codegen_static( + &self, + def_id: DefId, + is_mutable: bool, + ) { + unsafe { + let attrs = &self.tcx.codegen_fn_attrs(def_id); + + let (v, alloc) = match ::mir::codegen_static_initializer(&self, def_id) { + Ok(v) => v, + // Error has already been reported + Err(_) => return, + }; + + let g = &self.get_static(def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let mut val_llty = self.val_ty(v); + let v = if val_llty == self.type_i1() { + val_llty = self.type_i8(); + llvm::LLVMConstZExt(v, val_llty) + } else { + v + }; + + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(&self); + let g = if val_llty == llty { + g + } else { + // If we created the global with the wrong type, + // correct the type. + let empty_string = const_cstr!(""); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); + + let linkage = llvm::LLVMRustGetLinkage(g); + let visibility = llvm::LLVMRustGetVisibility(g); + + let new_g = llvm::LLVMRustGetOrInsertGlobal( + &self.llmod, name_string.as_ptr(), val_llty); + + llvm::LLVMRustSetLinkage(new_g, linkage); + llvm::LLVMRustSetVisibility(new_g, visibility); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + &self.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(&self, g, self.align_of(ty)); + llvm::LLVMSetInitializer(g, v); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if self.type_is_freeze(ty) { + llvm::LLVMSetGlobalConstant(g, llvm::True); + } } - } + debuginfo::create_global_var_metadata(&self, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g. using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if self.tcx.sess.target.target.options.is_like_osx { + let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") + } else { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") + }; + llvm::LLVMSetSection(g, sect_name.as_ptr()); + } + } - // Wasm statics with custom link sections get special treatment as they - // go into custom sections of the wasm executable. - if cx.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { - if let Some(section) = attrs.link_section { - let section = llvm::LLVMMDStringInContext( - cx.llcx, - section.as_str().as_ptr() as *const _, - section.as_str().len() as c_uint, - ); - let alloc = llvm::LLVMMDStringInContext( - cx.llcx, - alloc.bytes.as_ptr() as *const _, - alloc.bytes.len() as c_uint, - ); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext(cx.llcx, data.as_ptr(), 2); - llvm::LLVMAddNamedMetadataOperand( - cx.llmod, - "wasm.custom_sections\0".as_ptr() as *const _, - meta, - ); + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(section) = attrs.link_section { + let section = llvm::LLVMMDStringInContext( + &self.llcx, + section.as_str().as_ptr() as *const _, + section.as_str().len() as c_uint, + ); + let alloc = llvm::LLVMMDStringInContext( + &self.llcx, + alloc.bytes.as_ptr() as *const _, + alloc.bytes.len() as c_uint, + ); + let data = [section, alloc]; + let meta = llvm::LLVMMDNodeInContext(&self.llcx, data.as_ptr(), 2); + llvm::LLVMAddNamedMetadataOperand( + &self.llmod, + "wasm.custom_sections\0".as_ptr() as *const _, + meta, + ); + } + } else { + base::set_link_section(g, &attrs); } - } else { - base::set_link_section(g, &attrs); - } - if attrs.flags.contains(CodegenFnAttrFlags::USED) { - // This static will be stored in the llvm.used variable which is an array of i8* - let cast = llvm::LLVMConstPointerCast(g, cx.type_i8p()); - cx.used_statics.borrow_mut().push(cast); + if attrs.flags.contains(CodegenFnAttrFlags::USED) { + // This static will be stored in the llvm.used variable which is an array of i8* + let cast = llvm::LLVMConstPointerCast(g, &self.type_i8p()); + &self.used_statics.borrow_mut().push(cast); + } } } } diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index def4b49f27d65..24cae1e941e5c 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -13,9 +13,11 @@ mod backend; mod consts; mod type_; mod intrinsic; +mod statics; pub use self::builder::BuilderMethods; pub use self::backend::Backend; pub use self::consts::ConstMethods; pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods}; pub use self::intrinsic::{IntrinsicMethods, BaseIntrinsicMethods, DerivedIntrinsicMethods}; +pub use self::statics::StaticMethods; diff --git a/src/librustc_codegen_llvm/interfaces/statics.rs b/src/librustc_codegen_llvm/interfaces/statics.rs new file mode 100644 index 0000000000000..a20b814749172 --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/statics.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::layout::Align; +use rustc::hir::def_id::DefId; +use super::backend::Backend; + +pub trait StaticMethods<'tcx> : Backend { + fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn static_addr_of_mut( + &self, + cv: Self::Value, + align: Align, + kind: Option<&str>, + ) -> Self::Value; + fn static_addr_of( + &self, + cv: Self::Value, + align: Align, + kind: Option<&str>, + ) -> Self::Value; + fn get_static(&self, def_id: DefId) -> Self::Value; + fn codegen_static( + &self, + def_id: DefId, + is_mutable: bool, + ); +} diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 6c281a8d85ca5..86651bf269fdb 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -31,7 +31,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods, StaticMethods}; use rustc::session::Session; use syntax_pos::Span; @@ -845,7 +845,7 @@ fn codegen_msvc_try( let tcx = cx.tcx; let tydesc = match tcx.lang_items().msvc_try_filter() { - Some(did) => ::consts::get_static(cx, did), + Some(did) => cx.get_static(did), None => bug!("msvc_try_filter not defined"), }; let tok = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]); diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 161d205b7dd28..a4bdc19bcf11a 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -12,11 +12,10 @@ use abi::{FnType, FnTypeExt}; use callee; use context::CodegenCx; use builder::Builder; -use consts; use monomorphize; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, StaticMethods}; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; @@ -120,7 +119,7 @@ pub fn get_vtable( let vtable_const = cx.const_struct(&components, false); let align = cx.data_layout().pointer_align; - let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); + let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); debuginfo::create_vtable_metadata(cx, ty, vtable); diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index cb6eb54ed7e66..4ca5d55462f98 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -19,14 +19,13 @@ use base; use callee; use builder::{Builder, MemFlags}; use common::{self, IntPredicate}; -use consts; use meth; use monomorphize; use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods, StaticMethods}; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -379,10 +378,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let index = self.codegen_operand(&mut bx, index).immediate(); let file_line_col = bx.cx().const_struct(&[filename, line, col], false); - let file_line_col = consts::addr_of(bx.cx(), - file_line_col, - align, - Some("panic_bounds_check_loc")); + let file_line_col = bx.cx().static_addr_of( + file_line_col, + align, + Some("panic_bounds_check_loc") + ); (lang_items::PanicBoundsCheckFnLangItem, vec![file_line_col, index, len]) } @@ -394,10 +394,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { &[msg_str, filename, line, col], false ); - let msg_file_line_col = consts::addr_of(bx.cx(), - msg_file_line_col, - align, - Some("panic_loc")); + let msg_file_line_col = bx.cx().static_addr_of( + msg_file_line_col, + align, + Some("panic_loc") + ); (lang_items::PanicFnLangItem, vec![msg_file_line_col]) } diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 15aec86c784e8..cb0e1de94e3a8 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -20,13 +20,12 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; use builder::Builder; use common::{CodegenCx}; -use consts; use type_of::LayoutLlvmExt; use type_::Type; use syntax::ast::Mutability; use syntax::source_map::Span; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, StaticMethods}; use super::super::callee; use super::FunctionCx; @@ -49,7 +48,7 @@ pub fn scalar_to_llvm( if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { - consts::bitcast(llval, llty) + cx.static_bitcast(llval, llty) } }, Scalar::Ptr(ptr) => { @@ -58,9 +57,9 @@ pub fn scalar_to_llvm( Some(AllocType::Memory(alloc)) => { let init = const_alloc_to_llvm(cx, alloc); if alloc.mutability == Mutability::Mutable { - consts::addr_of_mut(cx, init, alloc.align, None) + cx.static_addr_of_mut(init, alloc.align, None) } else { - consts::addr_of(cx, init, alloc.align, None) + cx.static_addr_of(init, alloc.align, None) } } Some(AllocType::Function(fn_instance)) => { @@ -68,19 +67,19 @@ pub fn scalar_to_llvm( } Some(AllocType::Static(def_id)) => { assert!(cx.tcx.is_static(def_id).is_some()); - consts::get_static(cx, def_id) + cx.get_static(def_id) } None => bug!("missing allocation {:?}", ptr.alloc_id), }; let llval = unsafe { llvm::LLVMConstInBoundsGEP( - consts::bitcast(base_addr, cx.type_i8p()), + cx.static_bitcast(base_addr, cx.type_i8p()), &cx.const_usize(ptr.offset.bytes()), 1, ) }; if layout.value != layout::Pointer { unsafe { llvm::LLVMConstPtrToInt(llval, llty) } } else { - consts::bitcast(llval, llty) + cx.static_bitcast(llval, llty) } } } diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 04ea0aba039b3..28a60ac50c721 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -16,13 +16,12 @@ use rustc::mir::tcx::PlaceTy; use base; use builder::Builder; use common::{CodegenCx, IntPredicate}; -use consts; use type_of::LayoutLlvmExt; use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods, StaticMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -64,14 +63,14 @@ impl PlaceRef<'tcx, &'ll Value> { offset: Size, ) -> PlaceRef<'tcx, &'ll Value> { let init = const_alloc_to_llvm(bx.cx(), alloc); - let base_addr = consts::addr_of(bx.cx(), init, layout.align, None); + let base_addr = bx.cx().static_addr_of(init, layout.align, None); let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, bx.cx().type_i8p()), + bx.cx().static_bitcast(base_addr, bx.cx().type_i8p()), &bx.cx().const_usize(offset.bytes()), 1, )}; - let llval = consts::bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))); + let llval = bx.cx().static_bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))); PlaceRef::new_sized(llval, layout, alloc.align) } @@ -492,7 +491,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } mir::Place::Static(box mir::Static { def_id, ty }) => { let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align) + PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align) }, mir::Place::Projection(box mir::Projection { ref base, diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index dd9694b24ef91..a10a533fb836a 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -20,7 +20,6 @@ use base; use builder::Builder; use callee; use common::{self, IntPredicate, RealPredicate}; -use consts; use monomorphize; use type_::Type; use type_of::LayoutLlvmExt; @@ -841,7 +840,7 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(IntPredicate::IntUGE, x, max); let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); - let infinity = consts::bitcast(infinity_bits, float_ty); + let infinity = bx.bitcast(infinity_bits, float_ty); bx.select(overflow, infinity, bx.uitofp(x, float_ty)) } else { if signed { @@ -922,7 +921,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, 64 => bx.cx().const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; - consts::bitcast(bits_llval, float_ty) + bx.bitcast(bits_llval, float_ty) }; let (f_min, f_max) = match bx.cx().float_width(float_ty) { 32 => compute_clamp_bounds_single(signed, int_ty), diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index fa83b22f3bd7f..169bb6df73d30 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -17,7 +17,6 @@ use asm; use attributes; use base; -use consts; use context::CodegenCx; use declare; use llvm; @@ -31,6 +30,7 @@ use rustc::ty::TypeFoldable; use rustc::ty::layout::LayoutOf; use std::fmt; use value::Value; +use interfaces::StaticMethods; pub use rustc::mir::mono::MonoItem; @@ -55,7 +55,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { bug!("Expected Def::Static for {:?}, found nothing", def_id) } }; - consts::codegen_static(&cx, def_id, is_mutable); + cx.codegen_static(def_id, is_mutable); } MonoItem::GlobalAsm(node_id) => { let item = cx.tcx.hir.expect_item(node_id); From 037bc7a3fbe76ad894232891c22a421a09db3e4e Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 10 Sep 2018 17:59:20 +0200 Subject: [PATCH 42/76] Transfered memcpy and memset to BuilderMethods --- src/librustc_codegen_llvm/abi.rs | 14 +++--- src/librustc_codegen_llvm/base.rs | 45 +------------------ src/librustc_codegen_llvm/builder.rs | 42 +++++++++++++++++ .../interfaces/builder.rs | 19 ++++++++ src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 2 +- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 4 +- 8 files changed, 74 insertions(+), 56 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 0125ab3a257a9..0d87cbacebde1 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -9,7 +9,6 @@ // except according to those terms. use llvm::{self, AttributePlace}; -use base; use builder::{Builder, MemFlags}; use common::ty_fn_sig; use context::CodegenCx; @@ -239,12 +238,13 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { bx.store(val, llscratch, scratch_align); // ...and then memcpy it to the intended destination. - base::call_memcpy(bx, - bx.pointercast(dst.llval, cx.type_i8p()), - bx.pointercast(llscratch, cx.type_i8p()), - cx.const_usize(self.layout.size.bytes()), - self.layout.align.min(scratch_align), - MemFlags::empty()); + bx.call_memcpy( + bx.pointercast(dst.llval, cx.type_i8p()), + bx.pointercast(llscratch, cx.type_i8p()), + cx.const_usize(self.layout.size.bytes()), + self.layout.align.min(scratch_align), + MemFlags::empty() + ); bx.lifetime_end(llscratch, scratch_size); } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 207011a5d6ac2..36964ddc2009a 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -79,7 +79,6 @@ use std::any::Any; use std::ffi::CString; use std::sync::Arc; use std::time::{Instant, Duration}; -use std::i32; use std::cmp; use std::sync::mpsc; use syntax_pos::Span; @@ -432,33 +431,6 @@ pub fn to_immediate_scalar<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx val } -pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, - dst: ::Value, - src: ::Value, - n_bytes: ::Value, - align: Align, - flags: MemFlags, -) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = bx.load(src, align); - let ptr = bx.pointercast(dst, bx.cx().type_ptr_to(bx.cx().val_ty(val))); - bx.store_with_flags(val, ptr, align, flags); - return; - } - let cx = bx.cx(); - let ptr_width = &bx.sess().target.target.target_pointer_width; - let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); - let memcpy = cx.get_intrinsic(&key); - let src_ptr = bx.pointercast(src, cx.type_i8p()); - let dst_ptr = bx.pointercast(dst, cx.type_i8p()); - let size = bx.intcast(n_bytes, cx.type_isize(), false); - let align = cx.const_i32(align.abi() as i32); - let volatile = cx.const_bool(flags.contains(MemFlags::VOLATILE)); - bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); -} - pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, dst: ::Value, @@ -472,22 +444,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx> return; } - call_memcpy(bx, dst, src, bx.cx().const_usize(size), align, flags); -} - -pub fn call_memset<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, - ptr: ::Value, - fill_byte: ::Value, - size: ::Value, - align: ::Value, - volatile: bool, -) -> ::Value { - let ptr_width = &bx.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key); - let volatile = bx.cx().const_bool(volatile); - bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) + bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); } pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: Instance<'tcx>) { diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 51da73648746b..fdb4bf03e1aaf 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1293,6 +1293,48 @@ impl BuilderMethods<'a, 'll, 'tcx> } } + fn call_memcpy( + &self, + dst: &'ll Value, + src: &'ll Value, + n_bytes: &'ll Value, + align: Align, + flags: MemFlags, + ) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = &self.load(src, align); + let ptr = &self.pointercast(dst, &self.cx().type_ptr_to(&self.cx().val_ty(val))); + &self.store_with_flags(val, ptr, align, flags); + return; + } + let cx = &self.cx(); + let ptr_width = &self.sess().target.target.target_pointer_width; + let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); + let memcpy = cx.get_intrinsic(&key); + let src_ptr = &self.pointercast(src, cx.type_i8p()); + let dst_ptr = &self.pointercast(dst, cx.type_i8p()); + let size = &self.intcast(n_bytes, cx.type_isize(), false); + let align = cx.const_i32(align.abi() as i32); + let volatile = cx.const_bool(flags.contains(MemFlags::VOLATILE)); + &self.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); + } + + fn call_memset( + &self, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: &'ll Value, + volatile: bool, + ) -> &'ll Value { + let ptr_width = &self.sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = &self.cx().get_intrinsic(&intrinsic_key); + let volatile = &self.cx().const_bool(volatile); + &self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) + } + fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("zext"); unsafe { diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index afa22dac40e4e..9a68596b54cae 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -583,6 +583,25 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { args: &[::Value], bundle: Option<&OperandBundleDef<'ll, ::Value>> ) -> ::Value; + + fn call_memcpy( + &self, + dst: ::Value, + src: ::Value, + n_bytes: ::Value, + align: Align, + flags: MemFlags, + ); + + fn call_memset( + &self, + ptr: ::Value, + fill_byte: ::Value, + size: ::Value, + align: ::Value, + volatile: bool, + ) -> ::Value; + fn zext( &self, val: ::Value, diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 86651bf269fdb..02a0be4d2fe94 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -743,7 +743,7 @@ fn memset_intrinsic( let size = cx.const_usize(size.bytes()); let align = cx.const_i32(align.abi() as i32); let dst = bx.pointercast(dst, cx.type_i8p()); - call_memset(bx, dst, val, bx.mul(size, count), align, volatile) + bx.call_memset(dst, val, bx.mul(size, count), align, volatile) } fn try_intrinsic( diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 64b5219530a32..737b06b5d2161 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -349,7 +349,7 @@ impl OperandValue<&'ll Value> { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); - base::call_memcpy(bx, lldst, llptr, llsize, min_align, flags); + bx.call_memcpy(lldst, llptr, llsize, min_align, flags); // Store the allocated region and the extra to the indirect place. let indirect_operand = OperandValue::Pair(lldst, llextra); diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 28a60ac50c721..e94470c20c835 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -384,7 +384,7 @@ impl PlaceRef<'tcx, &'ll Value> { let (size, align) = self.layout.size_and_align(); let size = bx.cx().const_usize(size.bytes()); let align = bx.cx().const_u32(align.abi() as u32); - base::call_memset(bx, llptr, fill_byte, size, align, false); + bx.call_memset(llptr, fill_byte, size, align, false); } let niche = self.project_field(bx, 0); diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index a10a533fb836a..1005f0d03dcac 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -110,14 +110,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Use llvm.memset.p0i8.* to initialize all zero arrays if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { let fill = bx.cx().const_u8(0); - base::call_memset(&bx, start, fill, size, align, false); + bx.call_memset(start, fill, size, align, false); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = base::from_immediate(&bx, v); if bx.cx().val_ty(v) == bx.cx().type_i8() { - base::call_memset(&bx, start, v, size, align, false); + bx.call_memset(start, v, size, align, false); return bx; } } From ff464766af946b42f31eb3bf80154c1c821f6dbc Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 11 Sep 2018 11:46:03 +0200 Subject: [PATCH 43/76] Traitified IntrinsicCallMethods --- src/librustc_codegen_llvm/attributes.rs | 2 +- src/librustc_codegen_llvm/base.rs | 6 +- src/librustc_codegen_llvm/builder.rs | 10 +- src/librustc_codegen_llvm/consts.rs | 4 +- src/librustc_codegen_llvm/context.rs | 9 +- .../interfaces/builder.rs | 361 +++--- .../interfaces/intrinsic.rs | 20 +- src/librustc_codegen_llvm/interfaces/mod.rs | 4 +- src/librustc_codegen_llvm/intrinsic.rs | 1122 +++++++++-------- src/librustc_codegen_llvm/mir/block.rs | 8 +- src/librustc_codegen_llvm/mir/operand.rs | 2 +- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- 13 files changed, 790 insertions(+), 762 deletions(-) diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index fdd252b92cc16..c85cc8b0c8f7f 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -33,7 +33,7 @@ use value::Value; /// Mark LLVM function to use provided inline heuristic. #[inline] -pub fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) { +pub fn inline(cx: &CodegenCx<'ll, '_, &'ll Value>, val: &'ll Value, inline: InlineAttr) { use self::InlineAttr::*; match inline { Hint => Attribute::InlineHint.apply_llfn(Function, val), diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 36964ddc2009a..0e269c62fb93b 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -73,7 +73,7 @@ use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::sync::Lrc; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, Backend, DerivedIntrinsicMethods}; +use interfaces::*; use std::any::Any; use std::ffi::CString; @@ -409,7 +409,7 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll , } } -pub fn to_immediate<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx>>( +pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, val: ::Value, layout: layout::TyLayout, @@ -420,7 +420,7 @@ pub fn to_immediate<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx>>( val } -pub fn to_immediate_scalar<'a, 'll, 'tcx, Builder : BuilderMethods<'a, 'll, 'tcx>>( +pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, val: ::Value, scalar: &layout::Scalar, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index fdb4bf03e1aaf..f19cad76f6732 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -19,7 +19,7 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods}; +use interfaces::*; use syntax; use std::borrow::Cow; @@ -56,10 +56,12 @@ bitflags! { } } -impl BuilderMethods<'a, 'll, 'tcx> - for Builder<'a, 'll, 'tcx, &'ll Value> { - +impl<'a, 'll: 'a, 'tcx: 'll> HasCodegen<'a> for Builder<'a, 'll, 'tcx, &'ll Value> { type CodegenCx = CodegenCx<'ll, 'tcx, &'ll Value>; +} + +impl<'a, 'll: 'a, 'tcx: 'll> BuilderMethods<'a, 'll, 'tcx> + for Builder<'a, 'll, 'tcx, &'ll Value> { fn new_block<'b>( cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 6fbb21e8412bc..0e6a5c8b8c3cd 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -274,8 +274,8 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { // This item is external but not foreign, i.e. it originates from an external Rust // crate. Since we don't know whether this crate will be linked dynamically or // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to - // make things work. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. // // However, in some scenarios we defer emission of statics to downstream // crates, so there are cases where a static with an upstream DefId diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index ef8810d97a621..0fc71f7c26397 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -23,8 +23,7 @@ use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; -use interfaces::{BaseTypeMethods, DerivedTypeMethods, - IntrinsicMethods, BaseIntrinsicMethods, DerivedIntrinsicMethods}; +use interfaces::{BaseTypeMethods, DerivedTypeMethods, IntrinsicDeclarationMethods}; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -324,9 +323,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } } -impl BaseIntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> {} - -impl DerivedIntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> { +impl IntrinsicDeclarationMethods for CodegenCx<'b, 'tcx, &'b Value> { fn get_intrinsic(&self, key: &str) -> &'b Value { if let Some(v) = self.intrinsics.borrow().get(key).cloned() { return v; @@ -642,8 +639,6 @@ impl DerivedIntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> { } } -impl IntrinsicMethods for CodegenCx<'b, 'tcx, &'b Value> {} - impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { /// Generate a new symbol name with the given prefix. This symbol name must /// only be used for definitions with `internal` or `private` linkage. diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 9a68596b54cae..b7607da785b40 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -17,18 +17,17 @@ use builder::MemFlags; use super::backend::Backend; use super::type_::TypeMethods; use super::consts::ConstMethods; -use super::intrinsic::IntrinsicMethods; +use super::intrinsic::IntrinsicDeclarationMethods; use std::borrow::Cow; use std::ops::Range; use syntax::ast::AsmDialect; +pub trait HasCodegen<'a> { + type CodegenCx : 'a + Backend + TypeMethods + ConstMethods + IntrinsicDeclarationMethods; +} -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { - - - type CodegenCx : 'a + Backend + TypeMethods + ConstMethods + IntrinsicMethods; - +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a> { fn new_block<'b>( cx: &'a Self::CodegenCx, llfn: ::Value, @@ -76,125 +75,125 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { rhs: ::Value ) -> ::Value; fn fadd( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fadd_fast( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn sub( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fsub( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fsub_fast( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn mul( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fmul( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fmul_fast( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn udiv( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn exactudiv( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn sdiv( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn exactsdiv( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fdiv( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn fdiv_fast( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn urem( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn srem( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn frem( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn frem_fast( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn shl( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn lshr( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn ashr( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn and( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn or( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn xor( - &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + &self, + lhs: ::Value, + rhs: ::Value + ) -> ::Value; fn neg(&self, v: ::Value) -> ::Value; fn fneg(&self, v: ::Value) -> ::Value; fn not(&self, v: ::Value) -> ::Value; @@ -257,107 +256,107 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { ) -> ::Value; fn gep( - &self, - ptr: ::Value, - indices: &[::Value] - ) -> ::Value; + &self, + ptr: ::Value, + indices: &[::Value] + ) -> ::Value; fn inbounds_gep( - &self, - ptr: ::Value, - indices: &[::Value] - ) -> ::Value; + &self, + ptr: ::Value, + indices: &[::Value] + ) -> ::Value; fn struct_gep( - &self, - ptr: ::Value, - idx: u64 - ) -> ::Value; + &self, + ptr: ::Value, + idx: u64 + ) -> ::Value; fn trunc( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn sext( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn fptoui( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn fptosi( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn uitofp( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn sitofp( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn fptrunc( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn fpext( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn ptrtoint( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn inttoptr( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn bitcast( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn intcast( - &self, - val: ::Value, - dest_ty: ::Type, is_signed: bool - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type, is_signed: bool + ) -> ::Value; fn pointercast( - &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + &self, + val: ::Value, + dest_ty: ::Type + ) -> ::Value; fn icmp( - &self, - op: IntPredicate, - lhs: ::Value, rhs: ::Value - ) -> ::Value; + &self, + op: IntPredicate, + lhs: ::Value, rhs: ::Value + ) -> ::Value; fn fcmp( - &self, - op: RealPredicate, - lhs: ::Value, rhs: ::Value - ) -> ::Value; + &self, + op: RealPredicate, + lhs: ::Value, rhs: ::Value + ) -> ::Value; fn empty_phi( - &self, - ty: ::Type) -> ::Value; + &self, + ty: ::Type) -> ::Value; fn phi( &self, - ty: ::Type, + ty: ::Type, vals: &[::Value], bbs: &[::BasicBlock] - ) -> ::Value; + ) -> ::Value; fn inline_asm_call( &self, asm: *const c_char, @@ -575,7 +574,11 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> { fn lifetime_start(&self, ptr: ::Value, size: Size); fn lifetime_end(&self, ptr: ::Value, size: Size); - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ::Value, size: Size); + fn call_lifetime_intrinsic( + &self, + intrinsic: &str, + ptr: ::Value, size: Size + ); fn call( &self, diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs index cc50f815ffca6..f74beb4070888 100644 --- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs +++ b/src/librustc_codegen_llvm/interfaces/intrinsic.rs @@ -9,17 +9,27 @@ // except according to those terms. use super::backend::Backend; +use super::builder::HasCodegen; +use mir::operand::OperandRef; +use rustc::ty::Ty; +use abi::FnType; +use syntax_pos::Span; -pub trait BaseIntrinsicMethods : Backend { - +pub trait IntrinsicCallMethods<'a, 'tcx: 'a> : HasCodegen<'a> { + fn codegen_intrinsic_call( + &self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, ::Value>], + llresult: ::Value, + span: Span, + ); } -pub trait DerivedIntrinsicMethods : Backend { +pub trait IntrinsicDeclarationMethods : Backend { fn get_intrinsic(&self, key: &str) -> Self::Value; fn declare_intrinsic( &self, key: &str ) -> Option; } - -pub trait IntrinsicMethods : BaseIntrinsicMethods + DerivedIntrinsicMethods {} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 24cae1e941e5c..9f963f63383bf 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -15,9 +15,9 @@ mod type_; mod intrinsic; mod statics; -pub use self::builder::BuilderMethods; +pub use self::builder::{BuilderMethods, HasCodegen}; pub use self::backend::Backend; pub use self::consts::ConstMethods; pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods}; -pub use self::intrinsic::{IntrinsicMethods, BaseIntrinsicMethods, DerivedIntrinsicMethods}; +pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; pub use self::statics::StaticMethods; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 02a0be4d2fe94..32f406e8c2083 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -31,7 +31,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods, StaticMethods}; +use interfaces::*; use rustc::session::Session; use syntax_pos::Span; @@ -86,610 +86,630 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Opti Some(cx.get_intrinsic(&llvm_name)) } -/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, -/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, -/// add them to librustc_codegen_llvm/context.rs -pub fn codegen_intrinsic_call( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - callee_ty: Ty<'tcx>, - fn_ty: &FnType<'tcx, Ty<'tcx>>, - args: &[OperandRef<'tcx, &'ll Value>], - llresult: &'ll Value, - span: Span, -) { - let cx = bx.cx(); - let tcx = cx.tcx; +impl<'a, 'll: 'a, 'tcx: 'll> IntrinsicCallMethods<'a, 'tcx> + for Builder<'a, 'll, 'tcx, &'ll Value> { - let (def_id, substs) = match callee_ty.sty { - ty::FnDef(def_id, substs) => (def_id, substs), - _ => bug!("expected fn item type, found {}", callee_ty) - }; + /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, + /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, + /// add them to librustc_codegen_llvm/context.rs + fn codegen_intrinsic_call( + &self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, &'ll Value>], + llresult: &'ll Value, + span: Span, + ) { + let cx = &self.cx(); + let tcx = cx.tcx; - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = &*tcx.item_name(def_id).as_str(); - - let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); - let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); - - let simple = get_simple_intrinsic(cx, name); - let llval = match name { - _ if simple.is_some() => { - bx.call(simple.unwrap(), - &args.iter().map(|arg| arg.immediate()).collect::>(), - None) - } - "unreachable" => { - return; - }, - "likely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(true)], None) - } - "unlikely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None) - } - "try" => { - try_intrinsic(bx, cx, - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - llresult); - return; - } - "breakpoint" => { - let llfn = cx.get_intrinsic(&("llvm.debugtrap")); - bx.call(llfn, &[], None) - } - "size_of" => { - let tp_ty = substs.type_at(0); - cx.const_usize(cx.size_of(tp_ty).bytes()) - } - "size_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llsize - } else { + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = &*tcx.item_name(def_id).as_str(); + + let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); + let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); + + let simple = get_simple_intrinsic(cx, name); + let llval = match name { + _ if simple.is_some() => { + bx.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) + } + "unreachable" => { + return; + }, + "likely" => { + let expect = cx.get_intrinsic(&("llvm.expect.i1")); + bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(true)], None) + } + "unlikely" => { + let expect = cx.get_intrinsic(&("llvm.expect.i1")); + bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None) + } + "try" => { + try_intrinsic(bx, cx, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); + return; + } + "breakpoint" => { + let llfn = cx.get_intrinsic(&("llvm.debugtrap")); + bx.call(llfn, &[], None) + } + "size_of" => { + let tp_ty = substs.type_at(0); cx.const_usize(cx.size_of(tp_ty).bytes()) } - } - "min_align_of" => { - let tp_ty = substs.type_at(0); - cx.const_usize(cx.align_of(tp_ty).abi()) - } - "min_align_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llalign - } else { + "size_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (llsize, _) = + glue::size_and_align_of_dst(&self, tp_ty, Some(meta)); + llsize + } else { + cx.const_usize(cx.size_of(tp_ty).bytes()) + } + } + "min_align_of" => { + let tp_ty = substs.type_at(0); cx.const_usize(cx.align_of(tp_ty).abi()) } - } - "pref_align_of" => { - let tp_ty = substs.type_at(0); - cx.const_usize(cx.align_of(tp_ty).pref()) - } - "type_name" => { - let tp_ty = substs.type_at(0); - let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - cx.const_str_slice(ty_name) - } - "type_id" => { - cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0))) - } - "init" => { - let ty = substs.type_at(0); - if !cx.layout_of(ty).is_zst() { - // Just zero out the stack slot. - // If we store a zero constant, LLVM will drown in vreg allocation for large data - // structures, and the generated code will be awful. (A telltale sign of this is - // large quantities of `mov [byte ptr foo],0` in the generated code.) - memset_intrinsic( - bx, - false, - ty, - llresult, - cx.const_u8(0), - cx.const_usize(1) - ); + "min_align_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (_, llalign) = + glue::size_and_align_of_dst(&self, tp_ty, Some(meta)); + llalign + } else { + cx.const_usize(cx.align_of(tp_ty).abi()) + } } - return; - } - // Effectively no-ops - "uninit" => { - return; - } - "needs_drop" => { - let tp_ty = substs.type_at(0); - - cx.const_bool(bx.cx().type_needs_drop(tp_ty)) - } - "offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.inbounds_gep(ptr, &[offset]) - } - "arith_offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.gep(ptr, &[offset]) - } - - "copy_nonoverlapping" => { - copy_intrinsic(bx, false, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "copy" => { - copy_intrinsic(bx, true, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "write_bytes" => { - memset_intrinsic(bx, false, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - - "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bx, false, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_copy_memory" => { - copy_intrinsic(bx, true, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_set_memory" => { - memset_intrinsic(bx, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_load" | "unaligned_volatile_load" => { - let tp_ty = substs.type_at(0); - let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = bx.pointercast(ptr, bx.cx().type_ptr_to(ty.llvm_type(cx))); + "pref_align_of" => { + let tp_ty = substs.type_at(0); + cx.const_usize(cx.align_of(tp_ty).pref()) } - let load = bx.volatile_load(ptr); - let align = if name == "unaligned_volatile_load" { - 1 - } else { - cx.align_of(tp_ty).abi() as u32 - }; - unsafe { - llvm::LLVMSetAlignment(load, align); + "type_name" => { + let tp_ty = substs.type_at(0); + let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); + cx.const_str_slice(ty_name) } - to_immediate(bx, load, cx.layout_of(tp_ty)) - }, - "volatile_store" => { - let dst = args[0].deref(bx.cx()); - args[1].val.volatile_store(bx, dst); - return; - }, - "unaligned_volatile_store" => { - let dst = args[0].deref(bx.cx()); - args[1].val.unaligned_volatile_store(bx, dst); - return; - }, - "prefetch_read_data" | "prefetch_write_data" | - "prefetch_read_instruction" | "prefetch_write_instruction" => { - let expect = cx.get_intrinsic(&("llvm.prefetch")); - let (rw, cache_type) = match name { - "prefetch_read_data" => (0, 1), - "prefetch_write_data" => (1, 1), - "prefetch_read_instruction" => (0, 0), - "prefetch_write_instruction" => (1, 0), - _ => bug!() - }; - bx.call(expect, &[ - args[0].immediate(), - cx.const_i32(rw), - args[1].immediate(), - cx.const_i32(cache_type) - ], None) - }, - "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | - "bitreverse" | "add_with_overflow" | "sub_with_overflow" | - "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | - "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, cx) { - Some((width, signed)) => - match name { - "ctlz" | "cttz" => { - let y = cx.const_bool(false); - let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctlz_nonzero" | "cttz_nonzero" => { - let y = cx.const_bool(true); - let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); - let llfn = cx.get_intrinsic(llvm_name); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &[args[0].immediate()], None), - "bswap" => { - if width == 8 { - args[0].immediate() // byte swap a u8/i8 is just a no-op - } else { - bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &[args[0].immediate()], None) - } - } - "bitreverse" => { - bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), - &[args[0].immediate()], None) - } - "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { - let intrinsic = format!("llvm.{}{}.with.overflow.i{}", - if signed { 's' } else { 'u' }, - &name[..3], width); - let llfn = bx.cx().get_intrinsic(&intrinsic); - - // Convert `i1` to a `bool`, and write it to the out parameter - let pair = bx.call(llfn, &[ - args[0].immediate(), - args[1].immediate() - ], None); - let val = bx.extract_value(pair, 0); - let overflow = bx.zext(bx.extract_value(pair, 1), cx.type_bool()); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(overflow, dest.llval, dest.align); - - return; - }, - "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()), - "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()), - "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()), - "exact_div" => - if signed { - bx.exactsdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.exactudiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_div" => - if signed { - bx.sdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.udiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_rem" => - if signed { - bx.srem(args[0].immediate(), args[1].immediate()) - } else { - bx.urem(args[0].immediate(), args[1].immediate()) - }, - "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()), - "unchecked_shr" => - if signed { - bx.ashr(args[0].immediate(), args[1].immediate()) - } else { - bx.lshr(args[0].immediate(), args[1].immediate()) - }, - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - return; - } + "type_id" => { + cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0))) } - }, - "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { - let sty = &arg_tys[0].sty; - match float_type_width(sty) { - Some(_width) => - match name { - "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()), - "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()), - "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()), - "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), - "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()), - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic float type, found `{}`", name, sty)); - return; + "init" => { + let ty = substs.type_at(0); + if !cx.layout_of(ty).is_zst() { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large + // data structures, and the generated code will be awful. (A telltale sign of + // this is large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic( + &self, + false, + ty, + llresult, + cx.const_u8(0), + cx.const_usize(1) + ); } + return; } + // Effectively no-ops + "uninit" => { + return; + } + "needs_drop" => { + let tp_ty = substs.type_at(0); - }, - - "discriminant_value" => { - args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty) - } - - name if name.starts_with("simd_") => { - match generic_simd_intrinsic(bx, name, - callee_ty, - args, - ret_ty, llret_ty, - span) { - Ok(llval) => llval, - Err(()) => return + cx.const_bool(cx.type_needs_drop(tp_ty)) + } + "offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + &self.inbounds_gep(ptr, &[offset]) + } + "arith_offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + &self.gep(ptr, &[offset]) } - } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_[_]", and no ordering means SeqCst - name if name.starts_with("atomic_") => { - use self::AtomicOrdering::*; - - let split: Vec<&str> = name.split('_').collect(); - - let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; - let (order, failorder) = match split.len() { - 2 => (SequentiallyConsistent, SequentiallyConsistent), - 3 => match split[2] { - "unordered" => (Unordered, Unordered), - "relaxed" => (Monotonic, Monotonic), - "acq" => (Acquire, Acquire), - "rel" => (Release, Monotonic), - "acqrel" => (AcquireRelease, Acquire), - "failrelaxed" if is_cxchg => - (SequentiallyConsistent, Monotonic), - "failacq" if is_cxchg => - (SequentiallyConsistent, Acquire), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - 4 => match (split[2], split[3]) { - ("acq", "failrelaxed") if is_cxchg => - (Acquire, Monotonic), - ("acqrel", "failrelaxed") if is_cxchg => - (AcquireRelease, Monotonic), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - _ => cx.sess().fatal("Atomic intrinsic not in correct format"), - }; - let invalid_monomorphization = |ty| { - span_invalid_monomorphization_error(tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - }; + "copy_nonoverlapping" => { + copy_intrinsic(&self, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) + } + "copy" => { + copy_intrinsic(&self, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) + } + "write_bytes" => { + memset_intrinsic(&self, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } - match split[1] { - "cxchg" | "cxchgweak" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let weak = split[1] == "cxchgweak"; - let pair = bx.atomic_cmpxchg( - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - order, - failorder, - weak); - let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), bx.cx().type_bool()); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(success, dest.llval, dest.align); - return; - } else { - return invalid_monomorphization(ty); - } + "volatile_copy_nonoverlapping_memory" => { + copy_intrinsic(&self, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_copy_memory" => { + copy_intrinsic(&self, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_set_memory" => { + memset_intrinsic(&self, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_load" | "unaligned_volatile_load" => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_ty.ret.mode { + ptr = &self.pointercast(ptr, cx.type_ptr_to(ty.llvm_type(cx))); } - - "load" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let align = cx.align_of(ty); - bx.atomic_load(args[0].immediate(), order, align) - } else { - return invalid_monomorphization(ty); + let load = &self.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + cx.align_of(tp_ty).abi() as u32 + }; + unsafe { + llvm::LLVMSetAlignment(load, align); + } + to_immediate(self, load, cx.layout_of(tp_ty)) + }, + "volatile_store" => { + let dst = args[0].deref(cx); + args[1].val.volatile_store(&self, dst); + return; + }, + "unaligned_volatile_store" => { + let dst = args[0].deref(cx); + args[1].val.unaligned_volatile_store(&self, dst); + return; + }, + "prefetch_read_data" | "prefetch_write_data" | + "prefetch_read_instruction" | "prefetch_write_instruction" => { + let expect = cx.get_intrinsic(&("llvm.prefetch")); + let (rw, cache_type) = match name { + "prefetch_read_data" => (0, 1), + "prefetch_write_data" => (1, 1), + "prefetch_read_instruction" => (0, 0), + "prefetch_write_instruction" => (1, 0), + _ => bug!() + }; + &self.call(expect, &[ + args[0].immediate(), + cx.const_i32(rw), + args[1].immediate(), + cx.const_i32(cache_type) + ], None) + }, + "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | + "bitreverse" | "add_with_overflow" | "sub_with_overflow" | + "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, cx) { + Some((width, signed)) => + match name { + "ctlz" | "cttz" => { + let y = cx.const_bool(false); + let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctlz_nonzero" | "cttz_nonzero" => { + let y = cx.const_bool(true); + let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); + let llfn = cx.get_intrinsic(llvm_name); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctpop" => self.call( + cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &[args[0].immediate()], + None + ), + "bswap" => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } else { + self.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), + &[args[0].immediate()], None) + } + } + "bitreverse" => { + self.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), + &[args[0].immediate()], None) + } + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { + let intrinsic = format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + &name[..3], width); + let llfn = cx.get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let pair = &self.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); + let val = &self.extract_value(pair, 0); + let overflow = &self.zext( + &self.extract_value(pair, 1), + cx.type_bool() + ); + + let dest = result.project_field(&self, 0); + &self.store(val, dest.llval, dest.align); + let dest = result.project_field(&self, 1); + &self.store(overflow, dest.llval, dest.align); + + return; + }, + "overflowing_add" => self.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => self.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => self.mul(args[0].immediate(), args[1].immediate()), + "exact_div" => + if signed { + self.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + self.exactudiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_div" => + if signed { + self.sdiv(args[0].immediate(), args[1].immediate()) + } else { + self.udiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_rem" => + if signed { + self.srem(args[0].immediate(), args[1].immediate()) + } else { + self.urem(args[0].immediate(), args[1].immediate()) + }, + "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()), + "unchecked_shr" => + if signed { + self.ashr(args[0].immediate(), args[1].immediate()) + } else { + self.lshr(args[0].immediate(), args[1].immediate()) + }, + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + return; } } - "store" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let align = cx.align_of(ty); - bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align); + }, + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let sty = &arg_tys[0].sty; + match float_type_width(sty) { + Some(_width) => + match name { + "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", name, sty)); return; - } else { - return invalid_monomorphization(ty); } } - "fence" => { - bx.atomic_fence(order, SynchronizationScope::CrossThread); - return; - } + }, - "singlethreadfence" => { - bx.atomic_fence(order, SynchronizationScope::SingleThread); - return; - } + "discriminant_value" => { + args[0].deref(cx).codegen_get_discr(&self, ret_ty) + } - // These are all AtomicRMW ops - op => { - let atom_op = match op { - "xchg" => AtomicRmwBinOp::AtomicXchg, - "xadd" => AtomicRmwBinOp::AtomicAdd, - "xsub" => AtomicRmwBinOp::AtomicSub, - "and" => AtomicRmwBinOp::AtomicAnd, - "nand" => AtomicRmwBinOp::AtomicNand, - "or" => AtomicRmwBinOp::AtomicOr, - "xor" => AtomicRmwBinOp::AtomicXor, - "max" => AtomicRmwBinOp::AtomicMax, - "min" => AtomicRmwBinOp::AtomicMin, - "umax" => AtomicRmwBinOp::AtomicUMax, - "umin" => AtomicRmwBinOp::AtomicUMin, - _ => cx.sess().fatal("unknown atomic operation") - }; - - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) - } else { - return invalid_monomorphization(ty); - } + name if name.starts_with("simd_") => { + match generic_simd_intrinsic(&self, name, + callee_ty, + args, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return } } - } - - "nontemporal_store" => { - let dst = args[0].deref(bx.cx()); - args[1].val.nontemporal_store(bx, dst); - return; - } + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_[_]", and no ordering means SeqCst + name if name.starts_with("atomic_") => { + use self::AtomicOrdering::*; + + let split: Vec<&str> = name.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => + (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => + (SequentiallyConsistent, Acquire), + _ => cx.sess().fatal("unknown ordering in atomic intrinsic") + }, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => + (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => + (AcquireRelease, Monotonic), + _ => cx.sess().fatal("unknown ordering in atomic intrinsic") + }, + _ => cx.sess().fatal("Atomic intrinsic not in correct format"), + }; - _ => { - let intr = Intrinsic::find(&name).unwrap_or_else(|| - bug!("unknown intrinsic '{}'", name)); + let invalid_monomorphization = |ty| { + span_invalid_monomorphization_error(tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + }; - fn one(x: Vec) -> T { - assert_eq!(x.len(), 1); - x.into_iter().next().unwrap() - } - fn ty_to_type( - cx: &CodegenCx<'ll, '_, &'ll Value>, - t: &intrinsics::Type - ) -> Vec<&'ll Type> { - use intrinsics::Type::*; - match *t { - Void => vec![cx.type_void()], - Integer(_signed, _width, llvm_width) => { - vec![cx.type_ix( llvm_width as u64)] + match split[1] { + "cxchg" | "cxchgweak" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let weak = split[1] == "cxchgweak"; + let pair = &self.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); + let val = &self.extract_value(pair, 0); + let success = &self.zext( + &self.extract_value(pair, 1), + &self.cx().type_bool() + ); + + let dest = result.project_field(&self, 0); + &self.store(val, dest.llval, dest.align); + let dest = result.project_field(&self, 1); + &self.store(success, dest.llval, dest.align); + return; + } else { + return invalid_monomorphization(ty); + } } - Float(x) => { - match x { - 32 => vec![cx.type_f32()], - 64 => vec![cx.type_f64()], - _ => bug!() + + "load" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let align = cx.align_of(ty); + self.atomic_load(args[0].immediate(), order, align) + } else { + return invalid_monomorphization(ty); } } - Pointer(ref t, ref llvm_elem, _const) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![cx.type_ptr_to(elem)] + + "store" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let align = cx.align_of(ty); + self.atomic_store( + args[1].immediate(), + args[0].immediate(), + order, + align + ); + return; + } else { + return invalid_monomorphization(ty); + } } - Vector(ref t, ref llvm_elem, length) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![cx.type_vector(elem, length as u64)] + + "fence" => { + &self.atomic_fence(order, SynchronizationScope::CrossThread); + return; } - Aggregate(false, ref contents) => { - let elems = contents.iter() - .map(|t| one(ty_to_type(cx, t))) - .collect::>(); - vec![cx.type_struct( &elems, false)] + + "singlethreadfence" => { + &self.atomic_fence(order, SynchronizationScope::SingleThread); + return; } - Aggregate(true, ref contents) => { - contents.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect() + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => AtomicRmwBinOp::AtomicXchg, + "xadd" => AtomicRmwBinOp::AtomicAdd, + "xsub" => AtomicRmwBinOp::AtomicSub, + "and" => AtomicRmwBinOp::AtomicAnd, + "nand" => AtomicRmwBinOp::AtomicNand, + "or" => AtomicRmwBinOp::AtomicOr, + "xor" => AtomicRmwBinOp::AtomicXor, + "max" => AtomicRmwBinOp::AtomicMax, + "min" => AtomicRmwBinOp::AtomicMin, + "umax" => AtomicRmwBinOp::AtomicUMax, + "umin" => AtomicRmwBinOp::AtomicUMin, + _ => cx.sess().fatal("unknown atomic operation") + }; + + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + self.atomic_rmw( + atom_op, + args[0].immediate(), + args[1].immediate(), + order + ) + } else { + return invalid_monomorphization(ty); + } } } } - // This allows an argument list like `foo, (bar, baz), - // qux` to be converted into `foo, bar, baz, qux`, integer - // arguments to be truncated as needed and pointers to be - // cast. - fn modify_as_needed( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - t: &intrinsics::Type, - arg: &OperandRef<'tcx, &'ll Value>, - ) -> Vec<&'ll Value> { - match *t { - intrinsics::Type::Aggregate(true, ref contents) => { - // We found a tuple that needs squishing! So - // run over the tuple and load each field. - // - // This assumes the type is "simple", i.e. no - // destructors, and the contents are SIMD - // etc. - assert!(!bx.cx().type_needs_drop(arg.layout.ty)); - let (ptr, align) = match arg.val { - OperandValue::Ref(ptr, None, align) => (ptr, align), - _ => bug!() - }; - let arg = PlaceRef::new_sized(ptr, arg.layout, align); - (0..contents.len()).map(|i| { - arg.project_field(bx, i).load(bx).immediate() - }).collect() - } - intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); - vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))] - } - intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); - vec![ - bx.bitcast(arg.immediate(), - bx.cx().type_vector(llvm_elem, length as u64)) - ] + "nontemporal_store" => { + let dst = args[0].deref(cx); + args[1].val.nontemporal_store(&self, dst); + return; + } + + _ => { + let intr = match Intrinsic::find(&name) { + Some(intr) => intr, + None => bug!("unknown intrinsic '{}'", name), + }; + fn one(x: Vec) -> T { + assert_eq!(x.len(), 1); + x.into_iter().next().unwrap() + } + fn ty_to_type<'ll>( + cx: &CodegenCx<'ll, '_, &'ll Value>, + t: &intrinsics::Type + ) -> Vec<&'ll Type> { + use intrinsics::Type::*; + match *t { + Void => vec![cx.type_void()], + Integer(_signed, _width, llvm_width) => { + vec![cx.type_ix( llvm_width as u64)] + } + Float(x) => { + match x { + 32 => vec![cx.type_f32()], + 64 => vec![cx.type_f64()], + _ => bug!() + } + } + Pointer(ref t, ref llvm_elem, _const) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_ptr_to(elem)] + } + Vector(ref t, ref llvm_elem, length) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_vector(elem, length as u64)] + } + Aggregate(false, ref contents) => { + let elems = contents.iter() + .map(|t| one(ty_to_type(cx, t))) + .collect::>(); + vec![cx.type_struct( &elems, false)] + } + Aggregate(true, ref contents) => { + contents.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect() + } } - intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { - // the LLVM intrinsic uses a smaller integer - // size than the C intrinsic's signature, so - // we have to trim it down here. - vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))] + } + + // This allows an argument list like `foo, (bar, baz), + // qux` to be converted into `foo, bar, baz, qux`, integer + // arguments to be truncated as needed and pointers to be + // cast. + fn modify_as_needed<'a, 'll: 'a, 'tcx: 'll>( + bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + t: &intrinsics::Type, + arg: &OperandRef<'tcx, &'ll Value>, + ) -> Vec<&'ll Value> { + match *t { + intrinsics::Type::Aggregate(true, ref contents) => { + // We found a tuple that needs squishing! So + // run over the tuple and load each field. + // + // This assumes the type is "simple", i.e. no + // destructors, and the contents are SIMD + // etc. + assert!(!bx.cx().type_needs_drop(arg.layout.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, None, align) => (ptr, align), + _ => bug!() + }; + let arg = PlaceRef::new_sized(ptr, arg.layout, align); + (0..contents.len()).map(|i| { + arg.project_field(bx, i).load(bx).immediate() + }).collect() + } + intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))] + } + intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + vec![ + bx.bitcast(arg.immediate(), + bx.cx().type_vector(llvm_elem, length as u64)) + ] + } + intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { + // the LLVM intrinsic uses a smaller integer + // size than the C intrinsic's signature, so + // we have to trim it down here. + vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))] + } + _ => vec![arg.immediate()], } - _ => vec![arg.immediate()], } - } - let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect::>(); + let inputs = intr.inputs.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect::>(); - let outputs = one(ty_to_type(cx, &intr.output)); + let outputs = one(ty_to_type(cx, &intr.output)); - let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { - modify_as_needed(bx, t, arg) - }).collect(); - assert_eq!(inputs.len(), llargs.len()); + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(&self, t, arg) + }).collect(); + assert_eq!(inputs.len(), llargs.len()); - let val = match intr.definition { - intrinsics::IntrinsicDef::Named(name) => { - let f = declare::declare_cfn(cx, - name, - cx.type_func(&inputs, outputs)); - bx.call(f, &llargs, None) - } - }; + let val = match intr.definition { + intrinsics::IntrinsicDef::Named(name) => { + let f = declare::declare_cfn(cx, + name, + cx.type_func(&inputs, outputs)); + self.call(f, &llargs, None) + } + }; - match *intr.output { - intrinsics::Type::Aggregate(flatten, ref elems) => { - // the output is a tuple so we need to munge it properly - assert!(!flatten); + match *intr.output { + intrinsics::Type::Aggregate(flatten, ref elems) => { + // the output is a tuple so we need to munge it properly + assert!(!flatten); - for i in 0..elems.len() { - let dest = result.project_field(bx, i); - let val = bx.extract_value(val, i as u64); - bx.store(val, dest.llval, dest.align); + for i in 0..elems.len() { + let dest = result.project_field(&self, i); + let val = &self.extract_value(val, i as u64); + &self.store(val, dest.llval, dest.align); + } + return; } - return; + _ => val, } - _ => val, } - } - }; + }; - if !fn_ty.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr = bx.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx))); - bx.store(llval, ptr, result.align); - } else { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val.store(bx, result); + if !fn_ty.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_ty.ret.mode { + let ptr = &self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx))); + &self.store(llval, ptr, result.align); + } else { + OperandRef::from_immediate_or_packed_pair(&self, llval, result.layout) + .val.store(&self, result); + } } } } diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 4ca5d55462f98..5c20693c21268 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -25,7 +25,7 @@ use type_of::LayoutLlvmExt; use type_::Type; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods, StaticMethods}; +use interfaces::*; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -556,8 +556,6 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }; if intrinsic.is_some() && intrinsic != Some("drop_in_place") { - use intrinsic::codegen_intrinsic_call; - let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { @@ -624,8 +622,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx); - codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest, - terminator.source_info.span); + &bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, + terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 737b06b5d2161..6b760ba66bcc5 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -21,7 +21,7 @@ use value::Value; use type_of::LayoutLlvmExt; use glue; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedIntrinsicMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, IntrinsicDeclarationMethods}; use std::fmt; diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index e94470c20c835..537e8bb8327bf 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -21,7 +21,7 @@ use value::Value; use glue; use mir::constant::const_alloc_to_llvm; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods, StaticMethods}; +use interfaces::*; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 1005f0d03dcac..444a6bd83620f 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -25,7 +25,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedIntrinsicMethods}; +use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, IntrinsicDeclarationMethods}; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; From 9a8dfd73c44bb6557ab279ba48e43b9bd9f7ef98 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 13 Sep 2018 14:58:19 +0200 Subject: [PATCH 44/76] Generalized base::unsized_info --- src/librustc_codegen_llvm/base.rs | 21 ++-- src/librustc_codegen_llvm/builder.rs | 4 +- src/librustc_codegen_llvm/callee.rs | 15 +-- src/librustc_codegen_llvm/common.rs | 2 +- src/librustc_codegen_llvm/consts.rs | 4 +- src/librustc_codegen_llvm/context.rs | 15 ++- .../debuginfo/metadata.rs | 104 +++++++++--------- src/librustc_codegen_llvm/debuginfo/mod.rs | 1 - .../interfaces/builder.rs | 10 +- .../interfaces/debuginfo.rs | 20 ++++ .../interfaces/intrinsic.rs | 2 +- src/librustc_codegen_llvm/interfaces/misc.rs | 20 ++++ src/librustc_codegen_llvm/interfaces/mod.rs | 10 +- src/librustc_codegen_llvm/interfaces/type_.rs | 14 ++- src/librustc_codegen_llvm/intrinsic.rs | 5 +- src/librustc_codegen_llvm/meth.rs | 20 ++-- src/librustc_codegen_llvm/type_.rs | 25 ++++- 17 files changed, 190 insertions(+), 102 deletions(-) create mode 100644 src/librustc_codegen_llvm/interfaces/debuginfo.rs create mode 100644 src/librustc_codegen_llvm/interfaces/misc.rs diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 0e269c62fb93b..328776c656893 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -39,7 +39,7 @@ use rustc::middle::weak_lang_items; use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder}; use rustc::middle::cstore::{EncodedMetadata}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; use rustc::ty::query::Providers; use rustc::middle::cstore::{self, LinkagePreference}; use rustc::middle::exported_symbols; @@ -56,7 +56,6 @@ use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; use common::{self, IntPredicate, RealPredicate, TypeKind}; -use consts; use context::CodegenCx; use debuginfo; use declare; @@ -188,16 +187,16 @@ pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Builder : BuilderMethods<'a, 'll /// The `old_info` argument is a bit funny. It is intended for use /// in an upcast, where the new vtable for an object will be derived /// from the old one. -pub fn unsized_info( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, +pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( + cx: &'a Cx, source: Ty<'tcx>, target: Ty<'tcx>, - old_info: Option<&'ll Value>, -) -> &'ll Value { - let (source, target) = cx.tcx.struct_lockstep_tails(source, target); + old_info: Option, +) -> Cx::Value where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let (source, target) = cx.tcx().struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { (&ty::Array(_, len), &ty::Slice(_)) => { - cx.const_usize(len.unwrap_usize(cx.tcx)) + cx.const_usize(len.unwrap_usize(*cx.tcx())) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker @@ -206,10 +205,10 @@ pub fn unsized_info( old_info.expect("unsized_info: missing old info for trait upcast") } (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target)) + let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) .field(cx, abi::FAT_PTR_EXTRA); - consts::ptrcast(meth::get_vtable(cx, source, data.principal()), - vtable_ptr.llvm_type(cx)) + cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()), + cx.backend_type(vtable_ptr)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index f19cad76f6732..0583771d9330a 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -56,11 +56,11 @@ bitflags! { } } -impl<'a, 'll: 'a, 'tcx: 'll> HasCodegen<'a> for Builder<'a, 'll, 'tcx, &'ll Value> { +impl HasCodegen<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { type CodegenCx = CodegenCx<'ll, 'tcx, &'ll Value>; } -impl<'a, 'll: 'a, 'tcx: 'll> BuilderMethods<'a, 'll, 'tcx> +impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn new_block<'b>( diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index e46b1055affdd..beb0e01936998 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -22,7 +22,7 @@ use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::BaseTypeMethods; +use interfaces::*; use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; @@ -206,15 +206,16 @@ pub fn get_fn( llfn } -pub fn resolve_and_get_fn( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, +pub fn resolve_and_get_fn<'ll, 'tcx: 'll, + Cx : Backend + MiscMethods<'tcx> + TypeMethods<'ll, 'tcx> + >( + cx: &Cx, def_id: DefId, substs: &'tcx Substs<'tcx>, -) -> &'ll Value { - get_fn( - cx, +) -> Cx::Value { + cx.get_fn( ty::Instance::resolve( - cx.tcx, + *cx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 76765fad2c33f..00e08ec58cd3c 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -321,7 +321,7 @@ impl<'ll, 'tcx : 'll> ConstMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { let len = s.len(); let cs = consts::ptrcast(&self.const_cstr(s, false), - &self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self))); + &self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(self))); &self.const_fat_ptr(cs, &self.const_usize(len as u64)) } diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 0e6a5c8b8c3cd..4c7632365ccbf 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -201,7 +201,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { let g = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { - let llty = &self.layout_of(ty).llvm_type(&self); + let llty = &self.layout_of(ty).llvm_type(self); let (g, attrs) = match &self.tcx.hir.get(id) { Node::Item(&hir::Item { ref attrs, span, node: hir::ItemKind::Static(..), .. @@ -329,7 +329,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx); - let llty = self.layout_of(ty).llvm_type(&self); + let llty = self.layout_of(ty).llvm_type(self); let g = if val_llty == llty { g } else { diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 0fc71f7c26397..e8934e686004c 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -23,7 +23,7 @@ use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; -use interfaces::{BaseTypeMethods, DerivedTypeMethods, IntrinsicDeclarationMethods}; +use interfaces::*; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -323,6 +323,19 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { } } +impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn vtables(&self) -> &RefCell, + Option>), &'ll Value>> + { + &self.vtables + } + fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value { + callee::get_fn(&&self,instance) + } +} + +impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} + impl IntrinsicDeclarationMethods for CodegenCx<'b, 'tcx, &'b Value> { fn get_intrinsic(&self, key: &str) -> &'b Value { if let Some(v) = self.intrinsics.borrow().get(key).cloned() { diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 5629cf5a591d8..89e622efecc52 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -17,6 +17,7 @@ use super::utils::{debug_context, DIB, span_start, use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; +use interfaces::*; use abi; use value::Value; @@ -1744,58 +1745,61 @@ pub fn extend_scope_to_file( } } -/// Creates debug information for the given vtable, which is for the -/// given type. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_vtable_metadata( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - ty: ty::Ty<'tcx>, - vtable: &'ll Value, -) { - if cx.dbg_cx.is_none() { - return; - } - - let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); - - unsafe { - // LLVMRustDIBuilderCreateStructType() wants an empty array. A null - // pointer will lead to hard to trace and debug LLVM assertions - // later on in llvm/lib/IR/Value.cpp. - let empty_array = create_DIArray(DIB(cx), &[]); +impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - let name = const_cstr!("vtable"); + /// Creates debug information for the given vtable, which is for the + /// given type. + /// + /// Adds the created metadata nodes directly to the crate's IR. + fn create_vtable_metadata( + &self, + ty: ty::Ty<'tcx>, + vtable: &'ll Value, + ) { + if self.dbg_cx.is_none() { + return; + } - // Create a new one each time. We don't want metadata caching - // here, because each vtable will refer to a unique containing - // type. - let vtable_type = llvm::LLVMRustDIBuilderCreateStructType( - DIB(cx), - NO_SCOPE_METADATA, - name.as_ptr(), - unknown_file_metadata(cx), - UNKNOWN_LINE_NUMBER, - Size::ZERO.bits(), - cx.tcx.data_layout.pointer_align.abi_bits() as u32, - DIFlags::FlagArtificial, - None, - empty_array, - 0, - Some(type_metadata), - name.as_ptr() - ); + let type_metadata = type_metadata(&self, ty, syntax_pos::DUMMY_SP); - llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), - NO_SCOPE_METADATA, - name.as_ptr(), - ptr::null(), - unknown_file_metadata(cx), - UNKNOWN_LINE_NUMBER, - vtable_type, - true, - vtable, - None, - 0); + unsafe { + // LLVMRustDIBuilderCreateStructType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(&self), &[]); + + let name = const_cstr!("vtable"); + + // Create a new one each time. We don't want metadata caching + // here, because each vtable will refer to a unique containing + // type. + let vtable_type = llvm::LLVMRustDIBuilderCreateStructType( + DIB(&self), + NO_SCOPE_METADATA, + name.as_ptr(), + unknown_file_metadata(&self), + UNKNOWN_LINE_NUMBER, + Size::ZERO.bits(), + self.tcx.data_layout.pointer_align.abi_bits() as u32, + DIFlags::FlagArtificial, + None, + empty_array, + 0, + Some(type_metadata), + name.as_ptr() + ); + + llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(&self), + NO_SCOPE_METADATA, + name.as_ptr(), + ptr::null(), + unknown_file_metadata(&self), + UNKNOWN_LINE_NUMBER, + vtable_type, + true, + vtable, + None, + 0); + } } } diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 50f1e52b5bd88..08212f28569b3 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -58,7 +58,6 @@ mod source_loc; pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; pub use self::source_loc::start_emitting_source_locations; pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_vtable_metadata; pub use self::metadata::extend_scope_to_file; pub use self::source_loc::set_source_location; diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index b7607da785b40..e4613294e6bff 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -15,19 +15,17 @@ use rustc::ty::layout::{Align, Size}; use rustc::session::Session; use builder::MemFlags; use super::backend::Backend; -use super::type_::TypeMethods; -use super::consts::ConstMethods; -use super::intrinsic::IntrinsicDeclarationMethods; +use super::CodegenMethods; use std::borrow::Cow; use std::ops::Range; use syntax::ast::AsmDialect; -pub trait HasCodegen<'a> { - type CodegenCx : 'a + Backend + TypeMethods + ConstMethods + IntrinsicDeclarationMethods; +pub trait HasCodegen<'a, 'll: 'a, 'tcx :'ll> { + type CodegenCx : 'a + CodegenMethods<'ll, 'tcx>; } -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a> { +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn new_block<'b>( cx: &'a Self::CodegenCx, llfn: ::Value, diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs new file mode 100644 index 0000000000000..b0331ae11ef80 --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -0,0 +1,20 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::Ty; +use super::backend::Backend; + +pub trait DebugInfoMethods<'tcx> : Backend { + fn create_vtable_metadata( + &self, + ty: Ty<'tcx>, + vtable: Self::Value, + ); +} diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs index f74beb4070888..583cb8151a551 100644 --- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs +++ b/src/librustc_codegen_llvm/interfaces/intrinsic.rs @@ -15,7 +15,7 @@ use rustc::ty::Ty; use abi::FnType; use syntax_pos::Span; -pub trait IntrinsicCallMethods<'a, 'tcx: 'a> : HasCodegen<'a> { +pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn codegen_intrinsic_call( &self, callee_ty: Ty<'tcx>, diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs new file mode 100644 index 0000000000000..1897fee36e99d --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -0,0 +1,20 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cell::RefCell; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::{Ty, self, Instance}; +use super::backend::Backend; + +pub trait MiscMethods<'tcx> : Backend { + fn vtables(&self) -> &RefCell, + Option>), Self::Value>>; + fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; +} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 9f963f63383bf..1988776661e19 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -14,10 +14,18 @@ mod consts; mod type_; mod intrinsic; mod statics; +mod misc; +mod debuginfo; pub use self::builder::{BuilderMethods, HasCodegen}; pub use self::backend::Backend; pub use self::consts::ConstMethods; -pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods}; +pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; pub use self::statics::StaticMethods; +pub use self::misc::MiscMethods; +pub use self::debuginfo::DebugInfoMethods; + +pub trait CodegenMethods<'ll, 'tcx: 'll> : + Backend + TypeMethods<'ll, 'tcx> + MiscMethods<'tcx> + ConstMethods + + StaticMethods<'tcx> + DebugInfoMethods<'tcx> {} diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 894bf22f445b7..fb3764daf7d11 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -12,8 +12,12 @@ use super::backend::Backend; use common::TypeKind; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; +use std::cell::RefCell; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::layout::TyLayout; -pub trait BaseTypeMethods : Backend { +pub trait BaseTypeMethods<'a, 'tcx: 'a> : Backend { fn type_void(&self) -> Self::Type; fn type_metadata(&self) -> Self::Type; fn type_i1(&self) -> Self::Type; @@ -43,6 +47,8 @@ pub trait BaseTypeMethods : Backend { fn int_width(&self, ty: Self::Type) -> u64; fn val_ty(&self, v: Self::Value) -> Self::Type; + fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; + fn tcx(&self) -> &TyCtxt<'a, 'tcx, 'tcx>; } pub trait DerivedTypeMethods : Backend { @@ -72,4 +78,8 @@ pub trait DerivedTypeMethods : Backend { ) -> Self::Type; } -pub trait TypeMethods : BaseTypeMethods + DerivedTypeMethods {} +pub trait LayoutTypeMethods<'tcx> : Backend { + fn backend_type(&self, ty: TyLayout<'tcx>) -> Self::Type; +} + +pub trait TypeMethods<'a, 'tcx: 'a> : BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods + LayoutTypeMethods<'tcx> {} diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 32f406e8c2083..9dc1c96dd6c21 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -86,8 +86,7 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Opti Some(cx.get_intrinsic(&llvm_name)) } -impl<'a, 'll: 'a, 'tcx: 'll> IntrinsicCallMethods<'a, 'tcx> - for Builder<'a, 'll, 'tcx, &'ll Value> { +impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, @@ -100,7 +99,7 @@ impl<'a, 'll: 'a, 'tcx: 'll> IntrinsicCallMethods<'a, 'tcx> llresult: &'ll Value, span: Span, ) { - let cx = &self.cx(); + let cx = self.cx(); let tcx = cx.tcx; let sig = callee_ty.fn_sig(tcx); diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index a4bdc19bcf11a..3de839cc9705f 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -10,16 +10,14 @@ use abi::{FnType, FnTypeExt}; use callee; -use context::CodegenCx; use builder::Builder; use monomorphize; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, StaticMethods}; +use interfaces::*; use rustc::ty::{self, Ty}; -use rustc::ty::layout::HasDataLayout; -use debuginfo; +use rustc::ty::layout::{LayoutOf, HasDataLayout, HasTyCtxt, TyLayout}; #[derive(Copy, Clone, Debug)] pub struct VirtualIndex(u64); @@ -82,17 +80,17 @@ impl<'a, 'tcx> VirtualIndex { /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. -pub fn get_vtable( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, +pub fn get_vtable<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>>( + cx: &'a Cx, ty: Ty<'tcx>, trait_ref: ty::PolyExistentialTraitRef<'tcx>, -) -> &'ll Value { - let tcx = cx.tcx; +) -> Cx::Value where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let tcx = cx.tcx(); debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); // Check the cache. - if let Some(&val) = cx.vtables.borrow().get(&(ty, trait_ref)) { + if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) { return val; } @@ -121,8 +119,8 @@ pub fn get_vtable( let align = cx.data_layout().pointer_align; let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); - debuginfo::create_vtable_metadata(cx, ty, vtable); + cx.create_vtable_metadata(ty, vtable); - cx.vtables.borrow_mut().insert((ty, trait_ref), vtable); + cx.vtables().borrow_mut().insert((ty, trait_ref), vtable); vtable } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 7c05f631f3c4b..62280a2459f4b 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -16,15 +16,20 @@ use llvm; use llvm::{Bool, False, True}; use context::CodegenCx; use value::Value; -use interfaces::{BaseTypeMethods, DerivedTypeMethods, TypeMethods}; +use interfaces::*; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::layout::TyLayout; use rustc_data_structures::small_c_str::SmallCStr; use common::{self, TypeKind}; +use type_of::LayoutLlvmExt; use std::fmt; +use std::cell::RefCell; use libc::c_uint; @@ -42,7 +47,7 @@ impl fmt::Debug for Type { } } -impl BaseTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { +impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn type_void(&self) -> &'ll Type { unsafe { @@ -233,6 +238,14 @@ impl BaseTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { common::val_ty(v) } + + fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { + &self.scalar_lltypes + } + + fn tcx(&self) -> &TyCtxt<'ll, 'tcx, 'tcx> { + &self.tcx + } } impl Type { @@ -362,4 +375,10 @@ impl DerivedTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } -impl TypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> {} +impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn backend_type(&self, ty: TyLayout<'tcx>) -> &'ll Type { + ty.llvm_type(&self) + } +} + +impl TypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} From 56f3990313eb4e537d4de74db4b856ab199ada16 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 13 Sep 2018 17:41:40 +0200 Subject: [PATCH 45/76] Generalized base::unsize_thin_ptr --- src/librustc_codegen_llvm/base.rs | 20 +++++----- src/librustc_codegen_llvm/context.rs | 26 ------------ src/librustc_codegen_llvm/glue.rs | 2 +- src/librustc_codegen_llvm/interfaces/type_.rs | 17 ++++++-- src/librustc_codegen_llvm/mir/analyze.rs | 1 + src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- src/librustc_codegen_llvm/type_.rs | 40 +++++++++++++++++-- 7 files changed, 65 insertions(+), 43 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 328776c656893..318b8d7804f74 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -208,7 +208,7 @@ pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) .field(cx, abi::FAT_PTR_EXTRA); cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()), - cx.backend_type(vtable_ptr)) + cx.backend_type(&vtable_ptr)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, @@ -217,12 +217,14 @@ pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( } /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - src: &'ll Value, +pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + src: ::Value, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> -) -> (&'ll Value, &'ll Value) { +) -> (::Value, ::Value) where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); match (&src_ty.sty, &dst_ty.sty) { (&ty::Ref(_, a, _), @@ -232,13 +234,13 @@ pub fn unsize_thin_ptr( (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx())); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { @@ -263,8 +265,8 @@ pub fn unsize_thin_ptr( } let (lldata, llextra) = result.unwrap(); // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 0, true)), - bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 1, true))) + (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(&dst_layout, 0, true)), + bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(&dst_layout, 1, true))) } _ => bug!("unsize_thin_ptr: called on bad types"), } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index e8934e686004c..687b6d83c57ea 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -9,7 +9,6 @@ // except according to those terms. use attributes; -use common; use llvm; use rustc::dep_graph::DepGraphSafe; use rustc::hir; @@ -743,31 +742,6 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { llfn } - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx, ty) - } - - pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx, ty) - } - - pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx, ty) - } - - pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - use syntax_pos::DUMMY_SP; - if ty.is_sized(self.tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) { - return false; - } - - let tail = self.tcx.struct_tail(ty); - match tail.sty { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), - } - } } impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> { diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index d15a182a313b2..1bf7702c1371f 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -20,7 +20,7 @@ use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -use interfaces::{BuilderMethods, ConstMethods}; +use interfaces::*; pub fn size_and_align_of_dst( bx: &Builder<'_, 'll, 'tcx, &'ll Value>, diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index fb3764daf7d11..20709f15b1f78 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -51,7 +51,7 @@ pub trait BaseTypeMethods<'a, 'tcx: 'a> : Backend { fn tcx(&self) -> &TyCtxt<'a, 'tcx, 'tcx>; } -pub trait DerivedTypeMethods : Backend { +pub trait DerivedTypeMethods<'tcx> : Backend { fn type_bool(&self) -> Self::Type; fn type_char(&self) -> Self::Type; fn type_i8p(&self) -> Self::Type; @@ -76,10 +76,21 @@ pub trait DerivedTypeMethods : Backend { size: Size, align: Align ) -> Self::Type; + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool; + fn type_is_sized(&self, ty: Ty<'tcx>) -> bool; + fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool; + fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool; } pub trait LayoutTypeMethods<'tcx> : Backend { - fn backend_type(&self, ty: TyLayout<'tcx>) -> Self::Type; + fn backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; + fn scalar_pair_element_backend_type<'a>( + &self, + ty: &TyLayout<'tcx>, + index: usize, + immediate: bool + ) -> Self::Type; } -pub trait TypeMethods<'a, 'tcx: 'a> : BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods + LayoutTypeMethods<'tcx> {} +pub trait TypeMethods<'a, 'tcx: 'a> : BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs index 7cd360877c38c..ef361bf19c1c3 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_llvm/mir/analyze.rs @@ -22,6 +22,7 @@ use rustc::ty::layout::LayoutOf; use type_of::LayoutLlvmExt; use super::FunctionCx; use value::Value; +use interfaces::*; pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet { let mir = fx.mir; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 444a6bd83620f..34120b9b59ae5 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -25,7 +25,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, IntrinsicDeclarationMethods}; +use interfaces::*; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 62280a2459f4b..5215bec4cc5f2 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -22,7 +22,7 @@ use interfaces::*; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use rustc::util::nodemap::FxHashMap; -use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::TyLayout; use rustc_data_structures::small_c_str::SmallCStr; use common::{self, TypeKind}; @@ -276,7 +276,7 @@ impl Type { } } -impl DerivedTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { +impl DerivedTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn type_bool(&self) -> &'ll Type { &self.type_i8() @@ -373,12 +373,46 @@ impl DerivedTypeMethods for CodegenCx<'ll, 'tcx, &'ll Value> { assert_eq!(size % unit_size, 0); &self.type_array(&self.type_from_integer(unit), size / unit_size) } + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + common::type_needs_drop(*self.tcx(), ty) + } + + fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + common::type_is_sized(*self.tcx(), ty) + } + + fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { + common::type_is_freeze(*self.tcx(), ty) + } + + fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { + use syntax_pos::DUMMY_SP; + if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { + return false; + } + + let tail = self.tcx().struct_tail(ty); + match tail.sty { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail.sty), + } + } } impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - fn backend_type(&self, ty: TyLayout<'tcx>) -> &'ll Type { + fn backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { ty.llvm_type(&self) } + fn scalar_pair_element_backend_type<'a>( + &self, + ty: &TyLayout<'tcx>, + index: usize, + immediate: bool + ) -> &'ll Type { + ty.scalar_pair_element_llvm_type(&self, index, immediate) + } } impl TypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} From 8b9ff39ee06f1ddd6443925b3b02d7d412af8265 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 14 Sep 2018 17:48:57 +0200 Subject: [PATCH 46/76] Generalized base::coerce_unsized_into --- src/librustc_codegen_llvm/asm.rs | 4 +- src/librustc_codegen_llvm/base.rs | 17 ++-- src/librustc_codegen_llvm/builder.rs | 74 ++++++++++++++- src/librustc_codegen_llvm/glue.rs | 20 ++-- .../interfaces/backend.rs | 6 +- .../interfaces/builder.rs | 6 ++ src/librustc_codegen_llvm/interfaces/mod.rs | 4 + src/librustc_codegen_llvm/interfaces/type_.rs | 5 +- src/librustc_codegen_llvm/intrinsic.rs | 18 ++-- src/librustc_codegen_llvm/meth.rs | 8 +- src/librustc_codegen_llvm/mir/block.rs | 12 +-- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 43 ++++++--- src/librustc_codegen_llvm/mir/place.rs | 92 ++++--------------- src/librustc_codegen_llvm/type_.rs | 8 ++ src/librustc_codegen_llvm/value.rs | 3 + 16 files changed, 189 insertions(+), 133 deletions(-) diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index cf45675b584ec..0092720fa0184 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -37,11 +37,11 @@ pub fn codegen_inline_asm( let mut indirect_outputs = vec![]; for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { if out.is_rw { - inputs.push(place.load(bx).immediate()); + inputs.push(bx.load_ref(place).immediate()); ext_constraints.push(i.to_string()); } if out.is_indirect { - indirect_outputs.push(place.load(bx).immediate()); + indirect_outputs.push(bx.load_ref(place).immediate()); } else { output_types.push(place.layout.llvm_type(bx.cx())); } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 318b8d7804f74..88367cf44e635 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -66,7 +66,7 @@ use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, Codege use rustc_codegen_utils::symbol_names_test; use time_graph; use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt}; -use type_of::LayoutLlvmExt; + use rustc::util::nodemap::{FxHashMap, DefIdSet}; use CrateInfo; use rustc_data_structures::small_c_str::SmallCStr; @@ -274,22 +274,23 @@ pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx> /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - src: PlaceRef<'tcx, &'ll Value>, - dst: PlaceRef<'tcx, &'ll Value> -) { +pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + src: PlaceRef<'tcx, ::Value>, + dst: PlaceRef<'tcx, ::Value> +) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let src_ty = src.layout.ty; let dst_ty = dst.layout.ty; let coerce_ptr = || { - let (base, info) = match src.load(bx).val { + let (base, info) = match bx.load_ref(&src).val { OperandValue::Pair(base, info) => { // fat-ptr to fat-ptr unsize preserves the vtable // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR); - (bx.pointercast(base, thin_ptr.llvm_type(bx.cx())), info) + (bx.pointercast(base, bx.cx().backend_type(&thin_ptr)), info) } OperandValue::Immediate(base) => { unsize_thin_ptr(bx, base, src_ty, dst_ty) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 0583771d9330a..46f7acbbcf836 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -13,15 +13,18 @@ use llvm::{self, False, OperandBundleDef, BasicBlock}; use common::{self, *}; use context::CodegenCx; use type_::Type; +use type_of::LayoutLlvmExt; use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; -use rustc::ty::layout::{Align, Size}; +use rustc::ty::layout::{self, Align, Size}; use rustc::session::{config, Session}; use rustc_data_structures::small_c_str::SmallCStr; use interfaces::*; use syntax; - +use base; +use mir::operand::{OperandValue, OperandRef}; +use mir::place::PlaceRef; use std::borrow::Cow; use std::ops::Range; use std::ptr; @@ -533,6 +536,73 @@ impl BuilderMethods<'a, 'll, 'tcx> } } + fn load_ref( + &self, + ptr: &PlaceRef<'tcx, &'ll Value> + ) -> OperandRef<'tcx, &'ll Value> { + debug!("PlaceRef::load: {:?}", ptr); + + assert_eq!(ptr.llextra.is_some(), ptr.layout.is_unsized()); + + if ptr.layout.is_zst() { + return OperandRef::new_zst(self.cx(), ptr.layout); + } + + let scalar_load_metadata = |load, scalar: &layout::Scalar| { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(self.cx()); + if range.start != range.end { + &self.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + &self.nonnull_metadata(load); + } + _ => {} + } + }; + + let val = if let Some(llextra) = ptr.llextra { + OperandValue::Ref(ptr.llval, Some(llextra), ptr.align) + } else if ptr.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(ptr.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = &self.load(ptr.llval, ptr.align); + if let layout::Abi::Scalar(ref scalar) = ptr.layout.abi { + scalar_load_metadata(load, scalar); + } + load + }); + OperandValue::Immediate(base::to_immediate(self, llval, ptr.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = ptr.layout.abi { + let load = |i, scalar: &layout::Scalar| { + let llptr = self.struct_gep(ptr.llval, i as u64); + let load = self.load(llptr, ptr.align); + scalar_load_metadata(load, scalar); + if scalar.is_bool() { + self.trunc(load, self.cx().type_i1()) + } else { + load + } + }; + OperandValue::Pair(load(0, a), load(1, b)) + } else { + OperandValue::Ref(ptr.llval, None, ptr.align) + }; + + OperandRef { val, layout: ptr.layout } + } + + fn range_metadata(&self, load: &'ll Value, range: Range) { if self.sess().target.target.arch == "amdgpu" { diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 1bf7702c1371f..ea2be097bd782 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -14,23 +14,25 @@ use std; -use builder::Builder; use common::*; use meth; -use rustc::ty::layout::LayoutOf; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::ty::{self, Ty}; -use value::Value; use interfaces::*; -pub fn size_and_align_of_dst( - bx: &Builder<'_, 'll, 'tcx, &'ll Value>, +pub fn size_and_align_of_dst<'a, 'll: 'a, 'tcx: 'll, + Bx: BuilderMethods<'a, 'll, 'tcx> + >( + bx: &Bx, t: Ty<'tcx>, - info: Option<&'ll Value> -) -> (&'ll Value, &'ll Value) { + info: Option<::Value> +) -> (::Value, ::Value) where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ debug!("calculate size of DST: {}; with lost info: {:?}", t, info); if bx.cx().type_is_sized(t) { - let (size, align) = bx.cx().size_and_align_of(t); + let (size, align) = bx.cx().layout_of(t).size_and_align(); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); let size = bx.cx().const_usize(size.bytes()); @@ -47,7 +49,7 @@ pub fn size_and_align_of_dst( let unit = t.sequence_element_type(bx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let (size, align) = bx.cx().size_and_align_of(unit); + let (size, align) = bx.cx().layout_of(unit).size_and_align(); (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())), bx.cx().const_usize(align.abi())) } diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index 2b79e2522470c..27698e69ca0f7 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -8,11 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::fmt::Debug; +use super::CodegenObject; pub trait Backend { - type Value : Debug + PartialEq + Copy; + type Value : CodegenObject; type BasicBlock; - type Type : Debug + PartialEq + Copy; + type Type : CodegenObject; type Context; } diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index e4613294e6bff..ac055b36e1693 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -16,6 +16,8 @@ use rustc::session::Session; use builder::MemFlags; use super::backend::Backend; use super::CodegenMethods; +use mir::place::PlaceRef; +use mir::operand::OperandRef; use std::borrow::Cow; use std::ops::Range; @@ -228,6 +230,10 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { ptr: ::Value, order: AtomicOrdering, align: Align ) -> ::Value; + fn load_ref( + &self, + &PlaceRef<'tcx,::Value> + ) -> OperandRef<'tcx, ::Value>; fn range_metadata(&self, load: ::Value, range: Range); fn nonnull_metadata(&self, load: ::Value); diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 1988776661e19..91c82fce38e5f 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -26,6 +26,10 @@ pub use self::statics::StaticMethods; pub use self::misc::MiscMethods; pub use self::debuginfo::DebugInfoMethods; +use std::fmt; + pub trait CodegenMethods<'ll, 'tcx: 'll> : Backend + TypeMethods<'ll, 'tcx> + MiscMethods<'tcx> + ConstMethods + StaticMethods<'tcx> + DebugInfoMethods<'tcx> {} + +pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 20709f15b1f78..1deb25e0c156e 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -85,6 +85,8 @@ pub trait DerivedTypeMethods<'tcx> : Backend { pub trait LayoutTypeMethods<'tcx> : Backend { fn backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; + fn immediate_backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; + fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool; fn scalar_pair_element_backend_type<'a>( &self, ty: &TyLayout<'tcx>, @@ -93,4 +95,5 @@ pub trait LayoutTypeMethods<'tcx> : Backend { ) -> Self::Type; } -pub trait TypeMethods<'a, 'tcx: 'a> : BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} +pub trait TypeMethods<'a, 'tcx: 'a> : + BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 9dc1c96dd6c21..6adf03a855257 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -149,7 +149,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let tp_ty = substs.type_at(0); if let OperandValue::Pair(_, meta) = args[0].val { let (llsize, _) = - glue::size_and_align_of_dst(&self, tp_ty, Some(meta)); + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llsize } else { cx.const_usize(cx.size_of(tp_ty).bytes()) @@ -163,7 +163,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let tp_ty = substs.type_at(0); if let OperandValue::Pair(_, meta) = args[0].val { let (_, llalign) = - glue::size_and_align_of_dst(&self, tp_ty, Some(meta)); + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llalign } else { cx.const_usize(cx.align_of(tp_ty).abi()) @@ -341,9 +341,9 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> cx.type_bool() ); - let dest = result.project_field(&self, 0); + let dest = result.project_field(self, 0); &self.store(val, dest.llval, dest.align); - let dest = result.project_field(&self, 1); + let dest = result.project_field(self, 1); &self.store(overflow, dest.llval, dest.align); return; @@ -481,9 +481,9 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> &self.cx().type_bool() ); - let dest = result.project_field(&self, 0); + let dest = result.project_field(self, 0); &self.store(val, dest.llval, dest.align); - let dest = result.project_field(&self, 1); + let dest = result.project_field(self, 1); &self.store(success, dest.llval, dest.align); return; } else { @@ -639,7 +639,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> }; let arg = PlaceRef::new_sized(ptr, arg.layout, align); (0..contents.len()).map(|i| { - arg.project_field(bx, i).load(bx).immediate() + bx.load_ref(&arg.project_field(bx, i)).immediate() }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { @@ -690,7 +690,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> assert!(!flatten); for i in 0..elems.len() { - let dest = result.project_field(&self, i); + let dest = result.project_field(self, i); let val = &self.extract_value(val, i as u64); &self.store(val, dest.llval, dest.align); } @@ -707,7 +707,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> &self.store(llval, ptr, result.align); } else { OperandRef::from_immediate_or_packed_pair(&self, llval, result.layout) - .val.store(&self, result); + .val.store(self, result); } } } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 3de839cc9705f..e7c1c2de155ea 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -52,11 +52,11 @@ impl<'a, 'tcx> VirtualIndex { ptr } - pub fn get_usize( + pub fn get_usize>( self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - llvtable: &'ll Value - ) -> &'ll Value { + bx: &Bx, + llvtable: ::Value + ) -> ::Value { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 5c20693c21268..a5ccb3181bc4e 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -165,8 +165,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.cleanup_ret(cleanup_pad, None); } else { let slot = self.get_personality_slot(&bx); - let lp0 = slot.project_field(&bx, 0).load(&bx).immediate(); - let lp1 = slot.project_field(&bx, 1).load(&bx).immediate(); + let lp0 = &bx.load_ref(&slot.project_field(&bx, 0)).immediate(); + let lp1 = &bx.load_ref(&slot.project_field(&bx, 1)).immediate(); slot.storage_dead(&bx); if !bx.sess().target.target.options.custom_unwind_resume { @@ -793,7 +793,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); - self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]); + self.codegen_argument(bx, bx.load_ref(&field_ptr), llargs, &args[i]); } } else if let Ref(_, Some(_), _) = tuple.val { bug!("closure arguments must be sized") @@ -952,7 +952,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); - let op = place.load(bx); + let op = bx.load_ref(&place); place.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -990,7 +990,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { Nothing => (), Store(dst) => ret_ty.store(bx, llval, dst), IndirectOperand(tmp, index) => { - let op = tmp.load(bx); + let op = bx.load_ref(&tmp); tmp.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -1000,7 +1000,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); ret_ty.store(bx, llval, tmp); - let op = tmp.load(bx); + let op = bx.load_ref(&tmp); tmp.storage_dead(bx); op } else { diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index e4bd8f7450ba5..5cc8cb41bee88 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -566,7 +566,7 @@ fn arg_local_refs( let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); - indirect_operand.store(&bx, tmp); + indirect_operand.store(bx, tmp); tmp } else { let tmp = PlaceRef::alloca(bx, arg.layout, &name); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 6b760ba66bcc5..4aa7427b667a7 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -21,7 +21,7 @@ use value::Value; use type_of::LayoutLlvmExt; use glue; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, IntrinsicDeclarationMethods}; +use interfaces::*; use std::fmt; @@ -68,16 +68,20 @@ impl fmt::Debug for OperandRef<'tcx, &'ll Value> { } } -impl OperandRef<'tcx, &'ll Value> { - pub fn new_zst(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { +impl<'ll, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { + pub fn new_zst>( + cx: &Cx, + layout: TyLayout<'tcx> + ) -> OperandRef<'tcx, V> where Cx : Backend { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(cx.const_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(&layout))), layout } } +} +impl OperandRef<'tcx, &'ll Value> { pub fn from_const(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, val: &'tcx ty::Const<'tcx>) -> Result, Lrc>> { @@ -123,7 +127,7 @@ impl OperandRef<'tcx, &'ll Value> { OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(_, alloc, offset) => { - return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx)); + return Ok(bx.load_ref(&PlaceRef::from_const_alloc(bx, layout, alloc, offset))); }, }; @@ -256,10 +260,17 @@ impl OperandRef<'tcx, &'ll Value> { } } -impl OperandValue<&'ll Value> { - pub fn store(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>) { +impl OperandValue { + pub fn store<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + self, + bx: &Bx, + dest: PlaceRef<'tcx, ::Value> + ) where Bx::CodegenCx : Backend { self.store_with_flags(bx, dest, MemFlags::empty()); } +} + +impl OperandValue<&'ll Value> { pub fn volatile_store( self, @@ -286,13 +297,15 @@ impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> { ) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } +} - fn store_with_flags( +impl OperandValue { + fn store_with_flags<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dest: PlaceRef<'tcx, &'ll Value>, + bx: &Bx, + dest: PlaceRef<'tcx, ::Value>, flags: MemFlags, - ) { + ) where Bx::CodegenCx : Backend { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. @@ -427,7 +440,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // for most places, to consume them we just load them // out from their home - self.codegen_place(bx, place).load(bx) + bx.load_ref(&self.codegen_place(bx, place)) } pub fn codegen_operand(&mut self, @@ -458,11 +471,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.call(fnname, &[], None); // We've errored, so we don't have to produce working code. let layout = bx.cx().layout_of(ty); - PlaceRef::new_sized( + bx.load_ref(&PlaceRef::new_sized( bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))), layout, layout.align, - ).load(bx) + )) }) } } diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 537e8bb8327bf..488ce90f17f35 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -8,12 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, LLVMConstInBoundsGEP}; +use llvm::LLVMConstInBoundsGEP; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use base; use builder::Builder; use common::{CodegenCx, IntPredicate}; use type_of::LayoutLlvmExt; @@ -24,7 +23,7 @@ use mir::constant::const_alloc_to_llvm; use interfaces::*; use super::{FunctionCx, LocalRef}; -use super::operand::{OperandRef, OperandValue}; +use super::operand::OperandValue; #[derive(Copy, Clone, Debug)] pub struct PlaceRef<'tcx, V> { @@ -108,74 +107,18 @@ impl PlaceRef<'tcx, &'ll Value> { } } - pub fn load(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> { - debug!("PlaceRef::load: {:?}", self); - - assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); - - if self.layout.is_zst() { - return OperandRef::new_zst(bx.cx(), self.layout); - } - - let scalar_load_metadata = |load, scalar: &layout::Scalar| { - let vr = scalar.valid_range.clone(); - match scalar.value { - layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx.cx()); - if range.start != range.end { - bx.range_metadata(load, range); - } - } - layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - bx.nonnull_metadata(load); - } - _ => {} - } - }; - - let val = if let Some(llextra) = self.llextra { - OperandValue::Ref(self.llval, Some(llextra), self.align) - } else if self.layout.is_llvm_immediate() { - let mut const_llval = None; - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - } - let llval = const_llval.unwrap_or_else(|| { - let load = bx.load(self.llval, self.align); - if let layout::Abi::Scalar(ref scalar) = self.layout.abi { - scalar_load_metadata(load, scalar); - } - load - }); - OperandValue::Immediate(base::to_immediate(bx, llval, self.layout)) - } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { - let load = |i, scalar: &layout::Scalar| { - let llptr = bx.struct_gep(self.llval, i as u64); - let load = bx.load(llptr, self.align); - scalar_load_metadata(load, scalar); - if scalar.is_bool() { - bx.trunc(load, bx.cx().type_i1()) - } else { - load - } - }; - OperandValue::Pair(load(0, a), load(1, b)) - } else { - OperandValue::Ref(self.llval, None, self.align) - }; - - OperandRef { val, layout: self.layout } - } +} +impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. - pub fn project_field( - self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + pub fn project_field>( + self, bx: &Bx, ix: usize - ) -> PlaceRef<'tcx, &'ll Value> { + ) -> PlaceRef<'tcx, ::Value> + where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let cx = bx.cx(); let field = self.layout.field(cx, ix); let offset = self.layout.fields.offset(ix); @@ -194,7 +137,7 @@ impl PlaceRef<'tcx, &'ll Value> { }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))), + llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(&field))), llextra: if cx.type_has_metadata(field.ty) { self.llextra } else { @@ -267,7 +210,7 @@ impl PlaceRef<'tcx, &'ll Value> { let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = field.llvm_type(cx); + let ll_fty = cx.backend_type(&field); debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { @@ -277,6 +220,9 @@ impl PlaceRef<'tcx, &'ll Value> { align: effective_field_align, } } +} + +impl PlaceRef<'tcx, &'ll Value> { /// Obtain the actual discriminant of a value. pub fn codegen_get_discr( @@ -300,7 +246,7 @@ impl PlaceRef<'tcx, &'ll Value> { } let discr = self.project_field(bx, 0); - let lldiscr = discr.load(bx).immediate(); + let lldiscr = bx.load_ref(&discr).immediate(); match self.layout.variants { layout::Variants::Single { .. } => bug!(), layout::Variants::Tagged { ref tag, .. } => { @@ -451,7 +397,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { return place; } LocalRef::UnsizedPlace(place) => { - return place.load(bx).deref(&cx); + return bx.load_ref(&place).deref(&cx); } LocalRef::Operand(..) => { bug!("using operand local {:?} as place", place); diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 5215bec4cc5f2..4be9beaa3d3f5 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -47,6 +47,8 @@ impl fmt::Debug for Type { } } +impl<'ll> CodegenObject for &'ll Type {} + impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn type_void(&self) -> &'ll Type { @@ -405,6 +407,12 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { ty.llvm_type(&self) } + fn immediate_backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { + ty.immediate_llvm_type(&self) + } + fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool { + ty.is_llvm_immediate() + } fn scalar_pair_element_backend_type<'a>( &self, ty: &TyLayout<'tcx>, diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index 4bf5b09baa629..5ad0fecc326ba 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -12,6 +12,7 @@ pub use llvm::Value; use llvm; +use interfaces::CodegenObject; use std::fmt; use std::hash::{Hash, Hasher}; @@ -21,6 +22,8 @@ impl PartialEq for Value { } } +impl<'ll> CodegenObject for &'ll Value {} + impl Eq for Value {} impl Hash for Value { From c71c6ccffb6d1fb602941e2ccf85194a9b9a09fb Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 20 Sep 2018 15:47:22 +0200 Subject: [PATCH 47/76] Generalized mir::codegen_mir (and all subsequent functions) --- src/librustc_codegen_llvm/abi.rs | 52 +- src/librustc_codegen_llvm/asm.rs | 84 +-- src/librustc_codegen_llvm/attributes.rs | 2 +- src/librustc_codegen_llvm/base.rs | 22 +- src/librustc_codegen_llvm/builder.rs | 22 +- src/librustc_codegen_llvm/callee.rs | 22 +- src/librustc_codegen_llvm/common.rs | 163 +++-- src/librustc_codegen_llvm/consts.rs | 19 +- src/librustc_codegen_llvm/context.rs | 184 ++--- .../debuginfo/create_scope_map.rs | 10 +- src/librustc_codegen_llvm/debuginfo/gdb.rs | 5 +- .../debuginfo/metadata.rs | 62 ++ src/librustc_codegen_llvm/debuginfo/mod.rs | 641 ++++++++++-------- .../debuginfo/source_loc.rs | 4 +- .../debuginfo/type_names.rs | 1 + src/librustc_codegen_llvm/debuginfo/utils.rs | 1 + src/librustc_codegen_llvm/declare.rs | 255 +++---- src/librustc_codegen_llvm/interfaces/abi.rs | 32 + src/librustc_codegen_llvm/interfaces/asm.rs | 27 + .../interfaces/backend.rs | 2 +- .../interfaces/builder.rs | 16 +- .../interfaces/consts.rs | 19 +- .../interfaces/debuginfo.rs | 51 +- .../interfaces/declare.rs | 47 ++ src/librustc_codegen_llvm/interfaces/misc.rs | 8 + src/librustc_codegen_llvm/interfaces/mod.rs | 16 +- src/librustc_codegen_llvm/interfaces/type_.rs | 22 + src/librustc_codegen_llvm/intrinsic.rs | 22 +- src/librustc_codegen_llvm/meth.rs | 15 +- src/librustc_codegen_llvm/mir/analyze.rs | 38 +- src/librustc_codegen_llvm/mir/block.rs | 263 +++---- src/librustc_codegen_llvm/mir/constant.rs | 98 +-- src/librustc_codegen_llvm/mir/mod.rs | 160 ++--- src/librustc_codegen_llvm/mir/operand.rs | 129 ++-- src/librustc_codegen_llvm/mir/place.rs | 155 +++-- src/librustc_codegen_llvm/mir/rvalue.rs | 171 ++--- src/librustc_codegen_llvm/mir/statement.rs | 23 +- src/librustc_codegen_llvm/mono_item.rs | 10 +- src/librustc_codegen_llvm/type_.rs | 11 + src/librustc_codegen_llvm/type_of.rs | 2 +- 40 files changed, 1704 insertions(+), 1182 deletions(-) create mode 100644 src/librustc_codegen_llvm/interfaces/abi.rs create mode 100644 src/librustc_codegen_llvm/interfaces/asm.rs create mode 100644 src/librustc_codegen_llvm/interfaces/declare.rs diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 0d87cbacebde1..34c008e126957 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -17,11 +17,12 @@ use mir::operand::OperandValue; use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; +use rustc_target::abi::call::ArgType; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods}; +use interfaces::*; use rustc_target::abi::{LayoutOf, Size, TyLayout}; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, Instance}; use rustc::ty::layout; use libc::c_uint; @@ -279,6 +280,27 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } +impl<'a, 'll: 'a, 'tcx: 'll> ArgTypeMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + fn store_fn_arg( + &self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, dst: PlaceRef<'tcx, ::Value> + ) { + ty.store_fn_arg(&self, idx, dst) + } + fn store_arg_ty( + &self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ) { + ty.store(&self, val, dst) + } + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.memory_ty(self.cx()) + } +} + pub trait FnTypeExt<'tcx> { fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>) -> Self; fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, @@ -754,3 +776,29 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } } + +impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> { + FnType::new(&self, sig, extra_args) + } + fn new_vtable( + &self, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>] + ) -> FnType<'tcx, Ty<'tcx>> { + FnType::new_vtable(&self, sig, extra_args) + } + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> { + FnType::of_instance(&self, instance) + } +} + +impl AbiBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + fn apply_attrs_callsite( + &self, + ty: &FnType<'tcx, Ty<'tcx>>, + callsite: ::Value + ) { + ty.apply_attrs_callsite(&self, callsite) + } +} diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 0092720fa0184..032b7d2b7d6df 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -15,7 +15,7 @@ use builder::Builder; use value::Value; use rustc::hir; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods}; +use interfaces::*; use mir::place::PlaceRef; use mir::operand::OperandValue; @@ -23,33 +23,35 @@ use mir::operand::OperandValue; use std::ffi::CString; use libc::{c_uint, c_char}; -// Take an inline assembly expression and splat it out via LLVM -pub fn codegen_inline_asm( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - ia: &hir::InlineAsm, - outputs: Vec>, - mut inputs: Vec<&'ll Value> -) -> bool { - let mut ext_constraints = vec![]; - let mut output_types = vec![]; - - // Prepare the output operands - let mut indirect_outputs = vec![]; - for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { - if out.is_rw { - inputs.push(bx.load_ref(place).immediate()); - ext_constraints.push(i.to_string()); + +impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + // Take an inline assembly expression and splat it out via LLVM + fn codegen_inline_asm( + &self, + ia: &hir::InlineAsm, + outputs: Vec>, + mut inputs: Vec<&'ll Value> + ) -> bool { + let mut ext_constraints = vec![]; + let mut output_types = vec![]; + + // Prepare the output operands + let mut indirect_outputs = vec![]; + for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { + if out.is_rw { + inputs.push(self.load_ref(place).immediate()); + ext_constraints.push(i.to_string()); + } + if out.is_indirect { + indirect_outputs.push(self.load_ref(place).immediate()); + } else { + output_types.push(place.layout.llvm_type(self.cx())); + } } - if out.is_indirect { - indirect_outputs.push(bx.load_ref(place).immediate()); - } else { - output_types.push(place.layout.llvm_type(bx.cx())); + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; } - } - if !indirect_outputs.is_empty() { - indirect_outputs.extend_from_slice(&inputs); - inputs = indirect_outputs; - } let clobbers = ia.clobbers.iter() .map(|s| format!("~{{{}}}", &s)); @@ -103,26 +105,28 @@ pub fn codegen_inline_asm( OperandValue::Immediate(v).store(bx, place); } - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - unsafe { - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx, - key.as_ptr() as *const c_char, key.len() as c_uint); + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + unsafe { + let key = "srcloc"; + let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx, + key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32); - llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1)); + llvm::LLVMSetMetadata(r, kind, + llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1)); + } } return true; } -pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, - ga: &hir::GlobalAsm) { - let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); - unsafe { - llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr()); +impl AsmMethods for CodegenCx<'ll, 'tcx, &'ll Value> { + fn codegen_global_asm(&self, ga: &hir::GlobalAsm) { + let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); + unsafe { + llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr()); + } } } diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index c85cc8b0c8f7f..c8d789e821ad1 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -16,11 +16,11 @@ use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::session::Session; use rustc::session::config::Sanitizer; use rustc::ty::TyCtxt; -use rustc::ty::layout::HasTyCtxt; use rustc::ty::query::Providers; use rustc_data_structures::sync::Lrc; use rustc_data_structures::fx::FxHashMap; use rustc_target::spec::PanicStrategy; +use interfaces::*; use attributes; use llvm::{self, Attribute}; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 88367cf44e635..be9350503e21f 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -58,7 +58,6 @@ use rustc_mir::monomorphize::item::DefPathBasedNames; use common::{self, IntPredicate, RealPredicate, TypeKind}; use context::CodegenCx; use debuginfo; -use declare; use meth; use mir; use monomorphize::Instance; @@ -395,15 +394,18 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -pub fn call_assume(bx: &Builder<'_, 'll, '_, &'ll Value>, val: &'ll Value) { +pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( + bx: &Bx, + val: ::Value +) { let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); bx.call(assume_intrinsic, &[val], None); } -pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll ,'tcx>>( - bx: &Builder, - val: ::Value -) -> ::Value { +pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( + bx: &Bx, + val: ::Value +) -> ::Value { if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { @@ -449,7 +451,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx> bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); } -pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: Instance<'tcx>) { +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll>(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>) { let _s = if cx.sess().codegen_stats() { let mut instance_name = String::new(); DefPathBasedNames::new(cx.tcx, true, true) @@ -474,7 +476,7 @@ pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: cx.stats.borrow_mut().n_closures += 1; let mir = cx.tcx.instance_mir(instance.def); - mir::codegen_mir(cx, lldecl, &mir, instance, sig); + mir::codegen_mir::<'a, 'll, 'tcx, Builder<'a, 'll, 'tcx, &'ll Value>>(cx, lldecl, &mir, instance, sig); } pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { @@ -535,7 +537,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { &main_ret_ty.no_late_bound_regions().unwrap(), ); - if declare::get_defined_value(cx, "main").is_some() { + if cx.get_defined_value("main").is_some() { // FIXME: We should be smart and show a better diagnostic here. cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") @@ -543,7 +545,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { cx.sess().abort_if_errors(); bug!(); } - let llfn = declare::declare_cfn(cx, "main", llfty); + let llfn = cx.declare_cfn("main", llfty); // `main` should respect same config for frame pointer elimination as rest of code attributes::set_frame_pointer_elimination(cx, llfn); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 46f7acbbcf836..6ec0e38c1caad 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -18,7 +18,7 @@ use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; use rustc::ty::layout::{self, Align, Size}; -use rustc::session::{config, Session}; +use rustc::session::config; use rustc_data_structures::small_c_str::SmallCStr; use interfaces::*; use syntax; @@ -99,10 +99,6 @@ impl BuilderMethods<'a, 'll, 'tcx> Builder::new_block(self.cx, self.llfn(), name) } - fn sess(&self) -> &Session { - self.cx.sess() - } - fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.cx.tcx } @@ -605,7 +601,7 @@ impl BuilderMethods<'a, 'll, 'tcx> fn range_metadata(&self, load: &'ll Value, range: Range) { - if self.sess().target.target.arch == "amdgpu" { + if self.cx().sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, // tripping an assertion. So, for now, just disable this @@ -1381,7 +1377,7 @@ impl BuilderMethods<'a, 'll, 'tcx> return; } let cx = &self.cx(); - let ptr_width = &self.sess().target.target.target_pointer_width; + let ptr_width = &self.cx().sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); let memcpy = cx.get_intrinsic(&key); let src_ptr = &self.pointercast(src, cx.type_i8p()); @@ -1400,7 +1396,7 @@ impl BuilderMethods<'a, 'll, 'tcx> align: &'ll Value, volatile: bool, ) -> &'ll Value { - let ptr_width = &self.sess().target.target.target_pointer_width; + let ptr_width = &self.cx().sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = &self.cx().get_intrinsic(&intrinsic_key); let volatile = &self.cx().const_bool(volatile); @@ -1425,4 +1421,14 @@ impl BuilderMethods<'a, 'll, 'tcx> fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value> { &self.cx } + + fn delete_basic_block(&self, bb: &'ll BasicBlock) { + unsafe { + llvm::LLVMDeleteBasicBlock(bb); + } + } + + fn do_not_inline(&self, llret: &'ll Value) { + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + } } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index beb0e01936998..d0d0727a85959 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -15,12 +15,10 @@ //! closure. use attributes; -use common::{self, CodegenCx}; -use consts; -use declare; +use common; use llvm; use monomorphize::Instance; -use type_of::LayoutLlvmExt; +use context::CodegenCx; use value::Value; use interfaces::*; @@ -40,7 +38,7 @@ pub fn get_fn( cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>, ) -> &'ll Value { - let tcx = cx.tcx; + let tcx = cx.tcx(); debug!("get_fn(instance={:?})", instance); @@ -48,8 +46,8 @@ pub fn get_fn( assert!(!instance.substs.has_escaping_regions()); assert!(!instance.substs.has_param_types()); - let fn_ty = instance.ty(cx.tcx); - if let Some(&llfn) = cx.instances.borrow().get(&instance) { + let fn_ty = instance.ty(*cx.tcx()); + if let Some(&llfn) = cx.instances().borrow().get(&instance) { return llfn; } @@ -58,9 +56,9 @@ pub fn get_fn( // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(cx, fn_ty)); - let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx); + let llptrty = cx.backend_type(&cx.layout_of(fn_ptr_ty)); - let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) { + let llfn = if let Some(llfn) = cx.get_declared_value(&sym) { // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external // functions. If you have two crates that both bind the same C @@ -86,17 +84,17 @@ pub fn get_fn( // other weird situations. Annoying. if cx.val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - consts::ptrcast(llfn, llptrty) + cx.static_ptrcast(llfn, llptrty) } else { debug!("get_fn: not casting pointer!"); llfn } } else { - let llfn = declare::declare_fn(cx, &sym, fn_ty); + let llfn = cx.declare_fn(&sym, fn_ty); assert_eq!(cx.val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); - if instance.def.is_inline(tcx) { + if instance.def.is_inline(*tcx) { attributes::inline(cx, llfn, attributes::InlineAttr::Hint); } attributes::from_fn_attrs(cx, llfn, Some(instance.def.def_id())); diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 00e08ec58cd3c..7cbfa1efd1701 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -17,24 +17,26 @@ use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; use abi; use base; -use builder::Builder; use consts; -use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::{Backend, ConstMethods, BaseTypeMethods}; +use interfaces::*; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{HasDataLayout, LayoutOf}; +use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; +use rustc::mir::interpret::{Scalar, AllocType, Allocation}; use rustc::hir; use interfaces::BuilderMethods; +use mir::constant::const_alloc_to_llvm; +use mir::place::PlaceRef; use libc::{c_uint, c_char}; use std::iter; use rustc_target::spec::abi::Abi; use syntax::symbol::LocalInternedString; +use syntax::ast::Mutability; use syntax_pos::{Span, DUMMY_SP}; pub use context::CodegenCx; @@ -51,13 +53,13 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) } -pub struct OperandBundleDef<'a, Value> { +pub struct OperandBundleDef<'a, V> { pub name: &'a str, - pub val: Value + pub val: V } -impl OperandBundleDef<'ll, &'ll Value> { - pub fn new(name: &'ll str, val: &'ll Value) -> Self { +impl OperandBundleDef<'ll, V> { + pub fn new(name: &'ll str, val: V) -> Self { OperandBundleDef { name, val @@ -193,24 +195,24 @@ pub enum TypeKind { /// When inside of a landing pad, each function call in LLVM IR needs to be /// annotated with which landing pad it's a part of. This is accomplished via /// the `OperandBundleDef` value created for MSVC landing pads. -pub struct Funclet<'ll> { - cleanuppad: &'ll Value, - operand: OperandBundleDef<'ll, &'ll Value>, +pub struct Funclet<'ll, V> { + cleanuppad: V, + operand: OperandBundleDef<'ll, V>, } -impl Funclet<'ll> { - pub fn new(cleanuppad: &'ll Value) -> Self { +impl<'ll, V : CodegenObject> Funclet<'ll, V> { + pub fn new(cleanuppad: V) -> Self { Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", cleanuppad), } } - pub fn cleanuppad(&self) -> &'ll Value { + pub fn cleanuppad(&self) -> V { self.cleanuppad } - pub fn bundle(&self) -> &OperandBundleDef<'ll, &'ll Value> { + pub fn bundle(&self) -> &OperandBundleDef<'ll, V> { &self.operand } } @@ -222,7 +224,7 @@ impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { type Context = &'ll llvm::Context; } -impl<'ll, 'tcx : 'll> ConstMethods for CodegenCx<'ll, 'tcx, &'ll Value> { +impl<'ll, 'tcx : 'll> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { // LLVM constant constructors. fn const_null(&self, t: &'ll Type) -> &'ll Value { @@ -304,7 +306,7 @@ impl<'ll, 'tcx : 'll> ConstMethods for CodegenCx<'ll, 'tcx, &'ll Value> { s.len() as c_uint, !null_terminated as Bool); let sym = &self.generate_local_symbol_name("str"); - let g = declare::define_global(&self, &sym[..], &self.val_ty(sc)).unwrap_or_else(||{ + let g = &self.define_global(&sym[..], &self.val_ty(sc)).unwrap_or_else(||{ bug!("symbol `{}` is already defined", sym); }); llvm::LLVMSetInitializer(g, sc); @@ -419,6 +421,79 @@ impl<'ll, 'tcx : 'll> ConstMethods for CodegenCx<'ll, 'tcx, &'ll Value> { } } } + + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: &'ll Type, + ) -> &'ll Value { + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; + match cv { + Scalar::Bits { size: 0, .. } => { + assert_eq!(0, layout.value.size(self).bytes()); + self.const_undef(self.type_ix(0)) + }, + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.value.size(self).bytes()); + let llval = self.const_uint_big(self.type_ix(bitsize), bits); + if layout.value == layout::Pointer { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + self.static_bitcast(llval, llty) + } + }, + Scalar::Ptr(ptr) => { + let alloc_type = self.tcx.alloc_map.lock().get(ptr.alloc_id); + let base_addr = match alloc_type { + Some(AllocType::Memory(alloc)) => { + let init = const_alloc_to_llvm(self, alloc); + if alloc.mutability == Mutability::Mutable { + self.static_addr_of_mut(init, alloc.align, None) + } else { + self.static_addr_of(init, alloc.align, None) + } + } + Some(AllocType::Function(fn_instance)) => { + self.get_fn(fn_instance) + } + Some(AllocType::Static(def_id)) => { + assert!(self.tcx.is_static(def_id).is_some()); + self.get_static(def_id) + } + None => bug!("missing allocation {:?}", ptr.alloc_id), + }; + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.static_bitcast(base_addr, self.type_i8p()), + &self.const_usize(ptr.offset.bytes()), + 1, + ) }; + if layout.value != layout::Pointer { + unsafe { llvm::LLVMConstPtrToInt(llval, llty) } + } else { + self.static_bitcast(llval, llty) + } + } + } + } + + fn from_const_alloc( + &self, + layout: TyLayout<'tcx>, + alloc: &Allocation, + offset: Size, + ) -> PlaceRef<'tcx, &'ll Value> { + let init = const_alloc_to_llvm(self, alloc); + let base_addr = self.static_addr_of(init, layout.align, None); + + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.static_bitcast(base_addr, self.type_i8p()), + &self.const_usize(offset.bytes()), + 1, + )}; + let llval = self.static_bitcast(llval, self.type_ptr_to(layout.llvm_type(self))); + PlaceRef::new_sized(llval, layout, alloc.align) + } } pub fn val_ty(v: &'ll Value) -> &'ll Type { @@ -470,20 +545,23 @@ pub fn langcall(tcx: TyCtxt, // all shifts). For 32- and 64-bit types, this matches the semantics // of Java. (See related discussion on #1877 and #10183.) -pub fn build_unchecked_lshift( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - lhs: &'ll Value, - rhs: &'ll Value -) -> &'ll Value { +pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + lhs: ::Value, + rhs: ::Value +) -> ::Value { let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bx, rhs); bx.shl(lhs, rhs) } -pub fn build_unchecked_rshift( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { +pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + lhs_t: Ty<'tcx>, + lhs: ::Value, + rhs: ::Value +) -> ::Value { let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bx, rhs); @@ -495,26 +573,29 @@ pub fn build_unchecked_rshift( } } -fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, rhs: &'ll Value) -> &'ll Value { +fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + rhs: ::Value +) -> ::Value { let rhs_llty = bx.cx().val_ty(rhs); bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) } -pub fn shift_mask_val( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - llty: &'ll Type, - mask_llty: &'ll Type, +pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + llty: ::Type, + mask_llty: ::Type, invert: bool -) -> &'ll Value { +) -> ::Value { let kind = bx.cx().type_kind(llty); match kind { TypeKind::Integer => { // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. let val = bx.cx().int_width(llty) - 1; if invert { - bx.cx.const_int(mask_llty, !val as i64) + bx.cx().const_int(mask_llty, !val as i64) } else { - bx.cx.const_uint(mask_llty, val) + bx.cx().const_uint(mask_llty, val) } }, TypeKind::Vector => { @@ -530,16 +611,16 @@ pub fn shift_mask_val( } } -pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, - ty: Ty<'tcx>) - -> ty::PolyFnSig<'tcx> -{ +pub fn ty_fn_sig<'ll, 'tcx:'ll, Cx: CodegenMethods<'ll, 'tcx>>( + cx: &Cx, + ty: Ty<'tcx> +) -> ty::PolyFnSig<'tcx> { match ty.sty { ty::FnDef(..) | // Shims currently have type FnPtr. Not sure this should remain. - ty::FnPtr(_) => ty.fn_sig(cx.tcx), + ty::FnPtr(_) => ty.fn_sig(*cx.tcx()), ty::Closure(def_id, substs) => { - let tcx = cx.tcx; + let tcx = *cx.tcx(); let sig = substs.closure_sig(def_id, tcx); let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); @@ -552,8 +633,8 @@ pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, )) } ty::Generator(def_id, substs, _) => { - let tcx = cx.tcx; - let sig = substs.poly_sig(def_id, cx.tcx); + let tcx = *cx.tcx(); + let sig = substs.poly_sig(def_id, tcx); let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 4c7632365ccbf..26d1b86b03c5e 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -16,7 +16,6 @@ use debuginfo; use base; use monomorphize::MonoItem; use common::CodegenCx; -use declare; use monomorphize::Instance; use syntax_pos::Span; use syntax_pos::symbol::LocalInternedString; @@ -24,7 +23,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::{BaseTypeMethods, DerivedTypeMethods, StaticMethods}; +use interfaces::*; use rustc::ty::layout::{Align, LayoutOf}; @@ -79,7 +78,7 @@ fn check_and_apply_linkage( }; unsafe { // Declare a symbol `foo` with the desired linkage. - let g1 = declare::declare_global(cx, &sym, llty2); + let g1 = cx.declare_global(&sym, llty2); llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -90,7 +89,7 @@ fn check_and_apply_linkage( // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{ + let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{ if let Some(span) = span { cx.sess().span_fatal( span, @@ -107,7 +106,7 @@ fn check_and_apply_linkage( } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global - declare::declare_global(cx, &sym, llty) + cx.declare_global(&sym, llty) } } @@ -138,15 +137,15 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { unsafe { let gv = match kind { Some(kind) if !&self.tcx.sess.fewer_names() => { - let name = &self.generate_local_symbol_name(kind); - let gv = declare::define_global(&self, &name[..], + let name = self.generate_local_symbol_name(kind); + let gv = self.define_global(&name[..], &self.val_ty(cv)).unwrap_or_else(||{ bug!("symbol `{}` is already defined", name); }); llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); gv }, - _ => declare::define_private_global(&self, &self.val_ty(cv)), + _ => self.define_private_global(&self.val_ty(cv)), }; llvm::LLVMSetInitializer(gv, cv); set_global_alignment(&self, gv, align); @@ -206,11 +205,11 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { Node::Item(&hir::Item { ref attrs, span, node: hir::ItemKind::Static(..), .. }) => { - if declare::get_declared_value(&self, &sym[..]).is_some() { + if self.get_declared_value(&sym[..]).is_some() { span_bug!(span, "Conflicting symbol names for static?"); } - let g = declare::define_global(&self, &sym[..], llty).unwrap(); + let g = self.define_global(&sym[..], llty).unwrap(); if !&self.tcx.is_reachable_non_generic(def_id) { unsafe { diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 687b6d83c57ea..3a1ffe03616d5 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -15,7 +15,6 @@ use rustc::hir; use debuginfo; use callee; use base; -use declare; use monomorphize::Instance; use value::Value; @@ -23,6 +22,7 @@ use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; use interfaces::*; +use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -316,21 +316,108 @@ impl<'a, 'tcx, Value : Eq+Hash> CodegenCx<'a, 'tcx, Value> { } } -impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { - pub fn sess<'a>(&'a self) -> &'a Session { - &self.tcx.sess - } -} - impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn vtables(&self) -> &RefCell, Option>), &'ll Value>> { &self.vtables } + + fn instances(&self) -> &RefCell, &'ll Value>> { + &self.instances + } + fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value { callee::get_fn(&&self,instance) } + + fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { + llvm::get_param(llfn, index) + } + + fn eh_personality(&self) -> &'ll Value { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to codegen that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + if let Some(llpersonality) = self.eh_personality.get() { + return llpersonality + } + let tcx = self.tcx; + let llfn = match tcx.lang_items().eh_personality() { + Some(def_id) if !base::wants_msvc_seh(self.sess()) => { + callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) + } + _ => { + let name = if base::wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } else { + "rust_eh_personality" + }; + let fty = &self.type_variadic_func(&[], &self.type_i32()); + self.declare_cfn(name, fty) + } + }; + attributes::apply_target_cpu_attr(self, llfn); + self.eh_personality.set(Some(llfn)); + llfn + } + + // Returns a Value of the "eh_unwind_resume" lang item if one is defined, + // otherwise declares it as an external function. + fn eh_unwind_resume(&self) -> &'ll Value { + use attributes; + let unwresume = &self.eh_unwind_resume; + if let Some(llfn) = unwresume.get() { + return llfn; + } + + let tcx = self.tcx; + assert!(self.sess().target.target.options.custom_unwind_resume); + if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { + let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); + unwresume.set(Some(llfn)); + return llfn; + } + + let ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig( + iter::once(tcx.mk_mut_ptr(tcx.types.u8)), + tcx.types.never, + false, + hir::Unsafety::Unsafe, + Abi::C + ))); + + let llfn = self.declare_fn("rust_eh_unwind_resume", ty); + attributes::unwind(llfn, true); + attributes::apply_target_cpu_attr(self, llfn); + unwresume.set(Some(llfn)); + llfn + } + + fn sess(&self) -> &Session { + &self.tcx.sess + } + + fn check_overflow(&self) -> bool { + self.check_overflow + } } impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} @@ -352,7 +439,7 @@ impl IntrinsicDeclarationMethods for CodegenCx<'b, 'tcx, &'b Value> { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(&self, $name, &self.type_func(&[], $ret)); + let f = &self.declare_cfn($name, &self.type_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); &self.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -360,7 +447,7 @@ impl IntrinsicDeclarationMethods for CodegenCx<'b, 'tcx, &'b Value> { ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(&self, $name, &self.type_variadic_func(&[], $ret)); + let f = &self.declare_cfn($name, &self.type_variadic_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); &self.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -368,7 +455,7 @@ impl IntrinsicDeclarationMethods for CodegenCx<'b, 'tcx, &'b Value> { ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(&self, $name, &self.type_func(&[$($arg),*], $ret)); + let f = &self.declare_cfn($name, &self.type_func(&[$($arg),*], $ret)); llvm::SetUnnamedAddr(f, false); &self.intrinsics.borrow_mut().insert($name, f.clone()); return Some(f); @@ -665,83 +752,6 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); name } - - pub fn eh_personality(&self) -> &'b Value { - // The exception handling personality function. - // - // If our compilation unit has the `eh_personality` lang item somewhere - // within it, then we just need to codegen that. Otherwise, we're - // building an rlib which will depend on some upstream implementation of - // this function, so we just codegen a generic reference to it. We don't - // specify any of the types for the function, we just make it a symbol - // that LLVM can later use. - // - // Note that MSVC is a little special here in that we don't use the - // `eh_personality` lang item at all. Currently LLVM has support for - // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the - // *name of the personality function* to decide what kind of unwind side - // tables/landing pads to emit. It looks like Dwarf is used by default, - // injecting a dependency on the `_Unwind_Resume` symbol for resuming - // an "exception", but for MSVC we want to force SEH. This means that we - // can't actually have the personality function be our standard - // `rust_eh_personality` function, but rather we wired it up to the - // CRT's custom personality function, which forces LLVM to consider - // landing pads as "landing pads for SEH". - if let Some(llpersonality) = self.eh_personality.get() { - return llpersonality - } - let tcx = self.tcx; - let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.sess()) => { - callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) - } - _ => { - let name = if base::wants_msvc_seh(self.sess()) { - "__CxxFrameHandler3" - } else { - "rust_eh_personality" - }; - let fty = &self.type_variadic_func(&[], &self.type_i32()); - declare::declare_cfn(self, name, fty) - } - }; - attributes::apply_target_cpu_attr(self, llfn); - self.eh_personality.set(Some(llfn)); - llfn - } - - // Returns a Value of the "eh_unwind_resume" lang item if one is defined, - // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> &'b Value { - use attributes; - let unwresume = &self.eh_unwind_resume; - if let Some(llfn) = unwresume.get() { - return llfn; - } - - let tcx = self.tcx; - assert!(self.sess().target.target.options.custom_unwind_resume); - if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { - let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); - unwresume.set(Some(llfn)); - return llfn; - } - - let ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig( - iter::once(tcx.mk_mut_ptr(tcx.types.u8)), - tcx.types.never, - false, - hir::Unsafety::Unsafe, - Abi::C - ))); - - let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty); - attributes::unwind(llfn, true); - attributes::apply_target_cpu_attr(self, llfn); - unwresume.set(Some(llfn)); - llfn - } - } impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> { diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index 9582f175ceecd..8f70997eb2500 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -28,15 +28,15 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use syntax_pos::BytePos; #[derive(Clone, Copy, Debug)] -pub struct MirDebugScope<'ll> { - pub scope_metadata: Option<&'ll DIScope>, +pub struct MirDebugScope { + pub scope_metadata: Option, // Start and end offsets of the file to which this DIScope belongs. // These are used to quickly determine whether some span refers to the same file. pub file_start_pos: BytePos, pub file_end_pos: BytePos, } -impl MirDebugScope<'ll> { +impl MirDebugScope { pub fn is_valid(&self) -> bool { self.scope_metadata.is_some() } @@ -48,7 +48,7 @@ pub fn create_mir_scopes( cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, debug_context: &FunctionDebugContext<'ll>, -) -> IndexVec> { +) -> IndexVec> { let null_scope = MirDebugScope { scope_metadata: None, file_start_pos: BytePos(0), @@ -85,7 +85,7 @@ fn make_mir_scope(cx: &CodegenCx<'ll, '_, &'ll Value>, has_variables: &BitSet, debug_context: &FunctionDebugContextData<'ll>, scope: SourceScope, - scopes: &mut IndexVec>) { + scopes: &mut IndexVec>) { if scopes[scope].is_valid() { return; } diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 9794fabbdc054..a9ec5e571702a 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -14,10 +14,9 @@ use llvm; use common::CodegenCx; use builder::Builder; -use declare; use rustc::session::config::DebugInfo; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods}; +use interfaces::*; use syntax::attr; @@ -58,7 +57,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &' let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64); - let section_var = declare::define_global(cx, section_var_name, + let section_var = cx.define_global(section_var_name, llvm_type).unwrap_or_else(||{ bug!("symbol `{}` is already defined", section_var_name) }); diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 89e622efecc52..2efc706f27467 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -1729,6 +1729,68 @@ pub fn create_global_var_metadata( } } +/// Creates debug information for the given vtable, which is for the +/// given type. +/// +/// Adds the created metadata nodes directly to the crate's IR. +pub fn create_vtable_metadata( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + ty: ty::Ty<'tcx>, + vtable: &'ll Value, +) { + if cx.dbg_cx.is_none() { + return; + } + + let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); + + unsafe { + // LLVMRustDIBuilderCreateStructType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + let name = const_cstr!("vtable"); + + // Create a new one each time. We don't want metadata caching + // here, because each vtable will refer to a unique containing + // type. + let vtable_type = llvm::LLVMRustDIBuilderCreateStructType( + DIB(cx), + NO_SCOPE_METADATA, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + Size::ZERO.bits(), + cx.tcx.data_layout.pointer_align.abi_bits() as u32, + DIFlags::FlagArtificial, + None, + empty_array, + 0, + Some(type_metadata), + name.as_ptr() + ); + + llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), + NO_SCOPE_METADATA, + name.as_ptr(), + // LLVM 3.9 + // doesn't accept + // null here, so + // pass the name + // as the linkage + // name. + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + vtable_type, + true, + vtable, + None, + 0); + } +} + // Creates an "extension" of an existing DIScope into another file. pub fn extend_scope_to_file( cx: &CodegenCx<'ll, '_, &'ll Value>, diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 08212f28569b3..917884f7a1022 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -21,7 +21,8 @@ use self::metadata::{type_metadata, file_metadata, TypeMap}; use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; use llvm; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags}; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags, + DILexicalBlock}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def_id::{DefId, CrateNum}; use rustc::ty::subst::{Substs, UnpackedKind}; @@ -35,6 +36,7 @@ use rustc::mir; use rustc::session::config::{self, DebugInfo}; use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_data_structures::indexed_vec::IndexVec; use value::Value; use libc::c_uint; @@ -45,7 +47,7 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; use rustc::ty::layout::{self, LayoutOf}; -use interfaces::BuilderMethods; +use interfaces::*; pub mod gdb; mod utils; @@ -144,12 +146,12 @@ pub struct FunctionDebugContextData<'ll> { pub defining_crate: CrateNum, } -pub enum VariableAccess<'a, 'll> { +pub enum VariableAccess<'a, V> { // The llptr given is an alloca containing the variable's value - DirectVariable { alloca: &'ll Value }, + DirectVariable { alloca: V }, // The llptr given is an alloca containing the start of some pointer chain // leading to the variable's content. - IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] } + IndirectVariable { alloca: V, address_operations: &'a [i64] } } pub enum VariableKind { @@ -202,75 +204,158 @@ pub fn finalize(cx: &CodegenCx<'ll, '_, &'ll Value>) { }; } -/// Creates the function-specific debug context. -/// -/// Returns the FunctionDebugContext for the function which holds state needed -/// for debug info creation. The function may also return another variant of the -/// FunctionDebugContext enum which indicates why no debuginfo should be created -/// for the function. -pub fn create_function_debug_context( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - instance: Instance<'tcx>, - sig: ty::FnSig<'tcx>, - llfn: &'ll Value, - mir: &mir::Mir, -) -> FunctionDebugContext<'ll> { - if cx.sess().opts.debuginfo == DebugInfo::None { - return FunctionDebugContext::DebugInfoDisabled; +impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> + for Builder<'a, 'll, 'tcx, &'ll Value> +{ + fn declare_local( + &self, + dbg_context: &FunctionDebugContext<'ll>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: &'ll DIScope, + variable_access: VariableAccess<'_, &'ll Value>, + variable_kind: VariableKind, + span: Span, + ) { + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + let cx = self.cx(); + + let file = span_start(cx, span).file; + let file_metadata = file_metadata(cx, + &file.name, + dbg_context.get_ref(span).defining_crate); + + let loc = span_start(cx, span); + let type_metadata = type_metadata(cx, variable_type, span); + + let (argument_index, dwarf_tag) = match variable_kind { + ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), + LocalVariable => (0, DW_TAG_auto_variable) + }; + let align = cx.align_of(variable_type); + + let name = SmallCStr::new(&variable_name.as_str()); + match (variable_access, &[][..]) { + (DirectVariable { alloca }, address_operations) | + (IndirectVariable {alloca, address_operations}, _) => { + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateVariable( + DIB(cx), + dwarf_tag, + scope_metadata, + name.as_ptr(), + file_metadata, + loc.line as c_uint, + type_metadata, + cx.sess().opts.optimize != config::OptLevel::No, + DIFlags::FlagZero, + argument_index, + align.abi() as u32, + ) + }; + source_loc::set_debug_location(self, + InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); + unsafe { + let debug_loc = llvm::LLVMGetCurrentDebugLocation(self.llbuilder); + let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + alloca, + metadata, + address_operations.as_ptr(), + address_operations.len() as c_uint, + debug_loc, + self.llbb()); + + llvm::LLVMSetInstDebugLocation(self.llbuilder, instr); + } + source_loc::set_debug_location(self, UnknownLocation); + } + } } - if let InstanceDef::Item(def_id) = instance.def { - if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { - return FunctionDebugContext::FunctionWithoutDebugInfo; - } + fn set_source_location( + &self, + debug_context: &FunctionDebugContext<'ll>, + scope: Option<&'ll DIScope>, + span: Span, + ) { + set_source_location(debug_context, &self, scope, span) } +} - let span = mir.span; +impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - // This can be the case for functions inlined from another crate - if span.is_dummy() { - // FIXME(simulacrum): Probably can't happen; remove. - return FunctionDebugContext::FunctionWithoutDebugInfo; - } + type DIScope = &'ll DIScope; - let def_id = instance.def_id(); - let containing_scope = get_containing_scope(cx, instance); - let loc = span_start(cx, span); - let file_metadata = file_metadata(cx, &loc.file.name, def_id.krate); + /// Creates the function-specific debug context. + /// + /// Returns the FunctionDebugContext for the function which holds state needed + /// for debug info creation. The function may also return another variant of the + /// FunctionDebugContext enum which indicates why no debuginfo should be created + /// for the function. + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: &'ll Value, + mir: &mir::Mir, + ) -> FunctionDebugContext<'ll> { + if self.sess().opts.debuginfo == DebugInfo::None { + return FunctionDebugContext::DebugInfoDisabled; + } - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(cx, sig); - llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) - }; + if let InstanceDef::Item(def_id) = instance.def { + if self.tcx().codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + } - // Find the enclosing function, in case this is a closure. - let def_key = cx.tcx.def_key(def_id); - let mut name = def_key.disambiguated_data.data.to_string(); + let span = mir.span; - let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id); + // This can be the case for functions inlined from another crate + if span.is_dummy() { + // FIXME(simulacrum): Probably can't happen; remove. + return FunctionDebugContext::FunctionWithoutDebugInfo; + } - // Get_template_parameters() will append a `<...>` clause to the function - // name if necessary. - let generics = cx.tcx.generics_of(enclosing_fn_def_id); - let substs = instance.substs.truncate_to(cx.tcx, generics); - let template_parameters = get_template_parameters(cx, - &generics, - substs, - file_metadata, - &mut name); + let def_id = instance.def_id(); + let containing_scope = get_containing_scope(&self, instance); + let loc = span_start(&self, span); + let file_metadata = file_metadata(&self, &loc.file.name, def_id.krate); - // Get the linkage_name, which is just the symbol name - let linkage_name = mangled_name_of_instance(cx, instance); + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(&self, sig); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(&self), file_metadata, fn_signature) + }; + + // Find the enclosing function, in case this is a closure. + let def_key = self.tcx().def_key(def_id); + let mut name = def_key.disambiguated_data.data.to_string(); + + let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id); + + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = self.tcx().generics_of(enclosing_fn_def_id); + let substs = instance.substs.truncate_to(*self.tcx(), generics); + let template_parameters = get_template_parameters(&self, + &generics, + substs, + file_metadata, + &mut name); - let scope_line = span_start(cx, span).line; - let is_local_to_unit = is_node_local_to_unit(cx, def_id); + // Get the linkage_name, which is just the symbol name + let linkage_name = mangled_name_of_instance(&self, instance); - let function_name = CString::new(name).unwrap(); - let linkage_name = SmallCStr::new(&linkage_name.as_str()); + let scope_line = span_start(&self, span).line; + let is_local_to_unit = is_node_local_to_unit(&self, def_id); - let mut flags = DIFlags::FlagPrototyped; + let function_name = CString::new(name).unwrap(); + let linkage_name = SmallCStr::new(&linkage_name.as_str()); - let local_id = cx.tcx.hir.as_local_node_id(def_id); + let mut flags = DIFlags::FlagPrototyped; + + let local_id = self.tcx().hir.as_local_node_id(def_id); if let Some((id, _, _)) = *cx.sess().entry_fn.borrow() { if local_id == Some(id) { flags |= DIFlags::FlagMainSubprogram; @@ -281,269 +366,229 @@ pub fn create_function_debug_context( flags |= DIFlags::FlagNoReturn; } - let fn_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateFunction( - DIB(cx), - containing_scope, - function_name.as_ptr(), - linkage_name.as_ptr(), - file_metadata, - loc.line as c_uint, - function_type_metadata, - is_local_to_unit, - true, - scope_line as c_uint, - flags, - cx.sess().opts.optimize != config::OptLevel::No, - llfn, - template_parameters, - None) - }; + let fn_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFunction( + DIB(&self), + containing_scope, + function_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + loc.line as c_uint, + function_type_metadata, + is_local_to_unit, + true, + scope_line as c_uint, + flags, + self.sess().opts.optimize != config::OptLevel::No, + llfn, + template_parameters, + None) + }; - // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = FunctionDebugContextData { - fn_metadata, - source_locations_enabled: Cell::new(false), - defining_crate: def_id.krate, - }; + // Initialize fn debug context (including scope map and namespace map) + let fn_debug_context = FunctionDebugContextData { + fn_metadata, + source_locations_enabled: Cell::new(false), + defining_crate: def_id.krate, + }; - return FunctionDebugContext::RegularContext(fn_debug_context); + return FunctionDebugContext::RegularContext(fn_debug_context); - fn get_function_signature( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - sig: ty::FnSig<'tcx>, - ) -> &'ll DIArray { - if cx.sess().opts.debuginfo == DebugInfo::Limited { - return create_DIArray(DIB(cx), &[]); - } + fn get_function_signature<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + sig: ty::FnSig<'tcx>, + ) -> &'ll DIArray { + if cx.sess().opts.debuginfo == DebugInfo::Limited { + return create_DIArray(DIB(cx), &[]); + } - let mut signature = Vec::with_capacity(sig.inputs().len() + 1); + let mut signature = Vec::with_capacity(sig.inputs().len() + 1); - // Return type -- llvm::DIBuilder wants this at index 0 - signature.push(match sig.output().sty { - ty::Tuple(ref tys) if tys.is_empty() => None, - _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) - }); + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(match sig.output().sty { + ty::Tuple(ref tys) if tys.is_empty() => None, + _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) + }); - let inputs = if sig.abi == Abi::RustCall { - &sig.inputs()[..sig.inputs().len() - 1] - } else { - sig.inputs() - }; + let inputs = if sig.abi == Abi::RustCall { + &sig.inputs()[..sig.inputs().len() - 1] + } else { + sig.inputs() + }; - // Arguments types - if cx.sess().target.target.options.is_like_msvc { - // FIXME(#42800): - // There is a bug in MSDIA that leads to a crash when it encounters - // a fixed-size array of `u8` or something zero-sized in a - // function-type (see #40477). - // As a workaround, we replace those fixed-size arrays with a - // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would - // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, - // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. - // This transformed type is wrong, but these function types are - // already inaccurate due to ABI adjustments (see #42800). - signature.extend(inputs.iter().map(|&t| { - let t = match t.sty { - ty::Array(ct, _) - if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { - cx.tcx.mk_imm_ptr(ct) - } - _ => t - }; - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } else { - signature.extend(inputs.iter().map(|t| { - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } + // Arguments types + if cx.sess().target.target.options.is_like_msvc { + // FIXME(#42800): + // There is a bug in MSDIA that leads to a crash when it encounters + // a fixed-size array of `u8` or something zero-sized in a + // function-type (see #40477). + // As a workaround, we replace those fixed-size arrays with a + // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would + // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, + // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. + // This transformed type is wrong, but these function types are + // already inaccurate due to ABI adjustments (see #42800). + signature.extend(inputs.iter().map(|&t| { + let t = match t.sty { + ty::Array(ct, _) + if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { + cx.tcx.mk_imm_ptr(ct) + } + _ => t + }; + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } else { + signature.extend(inputs.iter().map(|t| { + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } - if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { - if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { - signature.extend( - args.iter().map(|argument_type| { - Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) - }) - ); + if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { + if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { + signature.extend( + args.iter().map(|argument_type| { + Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) + }) + ); + } } + + create_DIArray(DIB(cx), &signature[..]); } - create_DIArray(DIB(cx), &signature[..]) - } + fn get_template_parameters<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + generics: &ty::Generics, + substs: &Substs<'tcx>, + file_metadata: &'ll DIFile, + name_to_append_suffix_to: &mut String, + ) -> &'ll DIArray { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } - fn get_template_parameters( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - generics: &ty::Generics, - substs: &Substs<'tcx>, - file_metadata: &'ll DIFile, - name_to_append_suffix_to: &mut String, - ) -> &'ll DIArray { - if substs.types().next().is_none() { - return create_DIArray(DIB(cx), &[]); - } + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } - name_to_append_suffix_to.push('<'); - for (i, actual_type) in substs.types().enumerate() { - if i != 0 { - name_to_append_suffix_to.push_str(","); + let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx, + actual_type, + true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { + let names = get_parameter_names(cx, generics); + substs.iter().zip(names).filter_map(|(kind, name)| { + if let UnpackedKind::Type(ty) = kind.unpack() { + let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type_metadata = + type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = SmallCStr::new(&name.as_str()); + Some(unsafe { + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr(), + actual_type_metadata, + file_metadata, + 0, + 0, + )) + }) + } else { + None + } + }).collect() + } else { + vec![] + }; + + return create_DIArray(DIB(cx), &template_params[..]); + } - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); - // Add actual type name to <...> clause of function name - let actual_type_name = compute_debuginfo_type_name(cx, - actual_type, - true); - name_to_append_suffix_to.push_str(&actual_type_name[..]); + fn get_parameter_names<'ll>(cx: &CodegenCx<'ll, '_, &'ll Value>, + generics: &ty::Generics) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_parameter_names(cx, cx.tcx.generics_of(def_id)) + }); + names.extend(generics.params.iter().map(|param| param.name)); + names } - name_to_append_suffix_to.push('>'); - - // Again, only create type information if full debuginfo is enabled - let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { - let names = get_parameter_names(cx, generics); - substs.iter().zip(names).filter_map(|(kind, name)| { - if let UnpackedKind::Type(ty) = kind.unpack() { - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); - let actual_type_metadata = - type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); - let name = SmallCStr::new(&name.as_str()); - Some(unsafe { - Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_ptr(), - actual_type_metadata, - file_metadata, - 0, - 0, - )) - }) + + fn get_containing_scope<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + instance: Instance<'tcx>, + ) -> &'ll DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + &cx.tcx.type_of(impl_def_id), + ); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::Adt(def, ..) if !def.is_box() => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } } else { + // For trait method impls we still use the "parallel namespace" + // strategy None } - }).collect() - } else { - vec![] - }; - - create_DIArray(DIB(cx), &template_params[..]) + }); + + self_type.unwrap_or_else(|| { + namespace::item_namespace(cx, DefId { + krate: instance.def_id().krate, + index: cx.tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?") + }) + }) + } } - fn get_parameter_names(cx: &CodegenCx<'ll, '_, &'ll Value>, - generics: &ty::Generics) - -> Vec { - let mut names = generics.parent.map_or(vec![], |def_id| { - get_parameter_names(cx, cx.tcx.generics_of(def_id)) - }); - names.extend(generics.params.iter().map(|param| param.name)); - names + fn create_vtable_metadata( + &self, + ty: Ty<'tcx>, + vtable: Self::Value, + ) { + metadata::create_vtable_metadata(&self, ty, vtable) } - fn get_containing_scope( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - instance: Instance<'tcx>, - ) -> &'ll DIScope { - // First, let's see if this is a method within an inherent impl. Because - // if yes, we want to make the result subroutine DIE a child of the - // subroutine's self-type. - let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { - // If the method does *not* belong to a trait, proceed - if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { - let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( - instance.substs, - ty::ParamEnv::reveal_all(), - &cx.tcx.type_of(impl_def_id), - ); - - // Only "class" methods are generally understood by LLVM, - // so avoid methods on other types (e.g. `<*mut T>::null`). - match impl_self_ty.sty { - ty::Adt(def, ..) if !def.is_box() => { - Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) - } - _ => None - } - } else { - // For trait method impls we still use the "parallel namespace" - // strategy - None - } - }); - - self_type.unwrap_or_else(|| { - namespace::item_namespace(cx, DefId { - krate: instance.def_id().krate, - index: cx.tcx - .def_key(instance.def_id()) - .parent - .expect("get_containing_scope: missing parent?") - }) - }) + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext<'ll>, + ) -> IndexVec> { + create_scope_map::create_mir_scopes(&self, mir, debug_context) } -} -pub fn declare_local( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dbg_context: &FunctionDebugContext<'ll>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: &'ll DIScope, - variable_access: VariableAccess<'_, 'll>, - variable_kind: VariableKind, - span: Span, -) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); - let cx = bx.cx(); - - let file = span_start(cx, span).file; - let file_metadata = file_metadata(cx, - &file.name, - dbg_context.get_ref(span).defining_crate); - - let loc = span_start(cx, span); - let type_metadata = type_metadata(cx, variable_type, span); - - let (argument_index, dwarf_tag) = match variable_kind { - ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), - LocalVariable => (0, DW_TAG_auto_variable) - }; - let align = cx.align_of(variable_type); - - let name = SmallCStr::new(&variable_name.as_str()); - match (variable_access, &[][..]) { - (DirectVariable { alloca }, address_operations) | - (IndirectVariable {alloca, address_operations}, _) => { - let metadata = unsafe { - llvm::LLVMRustDIBuilderCreateVariable( - DIB(cx), - dwarf_tag, - scope_metadata, - name.as_ptr(), - file_metadata, - loc.line as c_uint, - type_metadata, - cx.sess().opts.optimize != config::OptLevel::No, - DIFlags::FlagZero, - argument_index, - align.abi() as u32, - ) - }; - source_loc::set_debug_location(bx, - InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); - unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder); - let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( - DIB(cx), - alloca, - metadata, - address_operations.as_ptr(), - address_operations.len() as c_uint, - debug_loc, - bx.llbb()); - - llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr); - } - source_loc::set_debug_location(bx, UnknownLocation); - } + fn extend_scope_to_file( + &self, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, + ) -> &'ll DILexicalBlock { + metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate) } } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 3f5c13ba1421c..42271d37958c1 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -17,7 +17,7 @@ use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use interfaces::BuilderMethods; +use interfaces::*; use libc::c_uint; use syntax_pos::{Span, Pos}; @@ -42,7 +42,7 @@ pub fn set_source_location( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); + debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span)); let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) } else { diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index bfb0d1cfa6e18..ab23b71b63b44 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -15,6 +15,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; use value::Value; +use interfaces::*; use rustc::hir; diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index fecf22a3a61f3..e99ec0d1b577c 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -20,6 +20,7 @@ use llvm; use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; use common::{CodegenCx}; use value::Value; +use interfaces::*; use syntax_pos::{self, Span}; diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index dfdbf7290ea7b..dd76295df5931 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -32,25 +32,9 @@ use attributes; use context::CodegenCx; use common; use type_::Type; +use interfaces::*; use value::Value; - -/// Declare a global value. -/// -/// If there’s a value with the same name already declared, the function will -/// return its Value instead. -pub fn declare_global( - cx: &CodegenCx<'ll, '_, &'ll Value>, - name: &str, ty: &'ll Type -) -> &'ll Value { - debug!("declare_global(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { - llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty) - } -} - - /// Declare a function. /// /// If there’s a value with the same name already declared, the function will @@ -112,132 +96,149 @@ fn declare_raw_fn( llfn } +impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + + /// Declare a global value. + /// + /// If there’s a value with the same name already declared, the function will + /// return its Value instead. + fn declare_global( + &self, + name: &str, ty: &'ll Type + ) -> &'ll Value { + debug!("declare_global(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { + llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty) + } + } -/// Declare a C ABI function. -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_fn` instead. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_cfn( - cx: &CodegenCx<'ll, '_, &'ll Value>, - name: &str, - fn_type: &'ll Type -) -> &'ll Value { - declare_raw_fn(cx, name, llvm::CCallConv, fn_type) -} + /// Declare a C ABI function. + /// + /// Only use this for foreign function ABIs and glue. For Rust functions use + /// `declare_fn` instead. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_cfn( + &self, + name: &str, + fn_type: &'ll Type + ) -> &'ll Value { + declare_raw_fn(self, name, llvm::CCallConv, fn_type) + } -/// Declare a Rust function. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_fn( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); - let sig = common::ty_fn_sig(cx, fn_type); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - debug!("declare_rust_fn (after region erasure) sig={:?}", sig); + /// Declare a Rust function. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> &'ll Value { + debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); + let sig = common::ty_fn_sig(self, fn_type); + let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + debug!("declare_rust_fn (after region erasure) sig={:?}", sig); + + let fty = FnType::new(self, sig, &[]); + let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); + + if self.layout_of(sig.output()).abi == layout::Abi::Uninhabited { + llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + } - let fty = FnType::new(cx, sig, &[]); - let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx)); + if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { + attributes::unwind(llfn, false); + } - if cx.layout_of(sig.output()).abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_llfn(Function, llfn); - } + fty.apply_attrs_llfn(llfn); - if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { - attributes::unwind(llfn, false); + llfn } - fty.apply_attrs_llfn(llfn); - llfn -} - - -/// Declare a global with an intention to define it. -/// -/// Use this function when you intend to define a global. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_global( - cx: &CodegenCx<'ll, '_, &'ll Value>, - name: &str, - ty: &'ll Type -) -> Option<&'ll Value> { - if get_defined_value(cx, name).is_some() { - None - } else { - Some(declare_global(cx, name, ty)) + /// Declare a global with an intention to define it. + /// + /// Use this function when you intend to define a global. This function will + /// return None if the name already has a definition associated with it. In that + /// case an error should be reported to the user, because it usually happens due + /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). + fn define_global( + &self, + name: &str, + ty: &'ll Type + ) -> Option<&'ll Value> { + if self.get_defined_value(name).is_some() { + None + } else { + Some(self.declare_global(name, ty)) + } } -} -/// Declare a private global -/// -/// Use this function when you intend to define a global without a name. -pub fn define_private_global(cx: &CodegenCx<'ll, '_, &'ll Value>, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty) + /// Declare a private global + /// + /// Use this function when you intend to define a global without a name. + fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) + } } -} -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_fn( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - if get_defined_value(cx, name).is_some() { - cx.sess().fatal(&format!("symbol `{}` already defined", name)) - } else { - declare_fn(cx, name, fn_type) + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> &'ll Value { + if self.get_defined_value(name).is_some() { + self.sess().fatal(&format!("symbol `{}` already defined", name)) + } else { + self.declare_fn(name, fn_type) + } } -} -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_internal_fn( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - let llfn = define_fn(cx, name, fn_type); - unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; - llfn -} + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_internal_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> &'ll Value { + let llfn = self.define_fn(name, fn_type); + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; + llfn + } -/// Get declared value by name. -pub fn get_declared_value(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Option<&'ll Value> { - debug!("get_declared_value(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) } -} + /// Get declared value by name. + fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { + debug!("get_declared_value(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) } + } -/// Get defined or externally defined (AvailableExternally linkage) value by -/// name. -pub fn get_defined_value(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Option<&'ll Value> { - get_declared_value(cx, name).and_then(|val|{ - let declaration = unsafe { - llvm::LLVMIsDeclaration(val) != 0 - }; - if !declaration { - Some(val) - } else { - None - } - }) + /// Get defined or externally defined (AvailableExternally linkage) value by + /// name. + fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { + self.get_declared_value(name).and_then(|val|{ + let declaration = unsafe { + llvm::LLVMIsDeclaration(val) != 0 + }; + if !declaration { + Some(val) + } else { + None + } + }) + } } diff --git a/src/librustc_codegen_llvm/interfaces/abi.rs b/src/librustc_codegen_llvm/interfaces/abi.rs new file mode 100644 index 0000000000000..b41b20f36c211 --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/abi.rs @@ -0,0 +1,32 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use abi::FnType; +use rustc::ty::{FnSig, Ty, Instance}; +use super::backend::Backend; +use super::builder::HasCodegen; + +pub trait AbiMethods<'tcx> { + fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>; + fn new_vtable( + &self, + sig: FnSig<'tcx>, + extra_args: &[Ty<'tcx>] + ) -> FnType<'tcx, Ty<'tcx>>; + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>; +} + +pub trait AbiBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { + fn apply_attrs_callsite( + &self, + ty: &FnType<'tcx, Ty<'tcx>>, + callsite: ::Value + ); +} diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_llvm/interfaces/asm.rs new file mode 100644 index 0000000000000..613f36eba5485 --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/asm.rs @@ -0,0 +1,27 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::{InlineAsm, GlobalAsm}; +use mir::place::PlaceRef; +use super::backend::Backend; +use super::builder::HasCodegen; + +pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ + fn codegen_inline_asm( + &self, + ia: &InlineAsm, + outputs: Vec::Value>>, + inputs: Vec<::Value> + ); +} + +pub trait AsmMethods { + fn codegen_global_asm(&self, ga: &GlobalAsm); +} diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index 27698e69ca0f7..023acbbe0b505 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -12,7 +12,7 @@ use super::CodegenObject; pub trait Backend { type Value : CodegenObject; - type BasicBlock; + type BasicBlock : Copy; type Type : CodegenObject; type Context; } diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index ac055b36e1693..177ee390f1cfd 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -12,10 +12,14 @@ use common::*; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; -use rustc::session::Session; use builder::MemFlags; use super::backend::Backend; use super::CodegenMethods; +use super::debuginfo::DebugInfoBuilderMethods; +use super::intrinsic::IntrinsicCallMethods; +use super::type_::ArgTypeMethods; +use super::abi::AbiBuilderMethods; +use super::asm::AsmBuilderMethods; use mir::place::PlaceRef; use mir::operand::OperandRef; @@ -27,7 +31,11 @@ pub trait HasCodegen<'a, 'll: 'a, 'tcx :'ll> { type CodegenCx : 'a + CodegenMethods<'ll, 'tcx>; } -pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + + DebugInfoBuilderMethods<'a, 'll, 'tcx> + ArgTypeMethods<'a, 'll, 'tcx> + + AbiBuilderMethods<'a, 'll, 'tcx> + IntrinsicCallMethods<'a, 'll, 'tcx> + + AsmBuilderMethods<'a, 'll, 'tcx> +{ fn new_block<'b>( cx: &'a Self::CodegenCx, llfn: ::Value, @@ -35,7 +43,6 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { ) -> Self; fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; - fn sess(&self) -> &Session; fn cx(&self) -> &'a Self::CodegenCx; fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; fn llfn(&self) -> ::Value; @@ -614,4 +621,7 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { val: ::Value, dest_ty: ::Type ) -> ::Value; + + fn delete_basic_block(&self, bb: ::BasicBlock); + fn do_not_inline(&self, llret: ::Value); } diff --git a/src/librustc_codegen_llvm/interfaces/consts.rs b/src/librustc_codegen_llvm/interfaces/consts.rs index 62ea583acbc38..f9f58afa56bac 100644 --- a/src/librustc_codegen_llvm/interfaces/consts.rs +++ b/src/librustc_codegen_llvm/interfaces/consts.rs @@ -10,8 +10,12 @@ use super::Backend; use syntax::symbol::LocalInternedString; +use rustc::ty::layout; +use rustc::mir::interpret::Scalar; +use rustc::mir::interpret::Allocation; +use mir::place::PlaceRef; -pub trait ConstMethods : Backend { +pub trait ConstMethods<'tcx> : Backend { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; fn const_undef(&self, t: Self::Type) -> Self::Value; @@ -51,4 +55,17 @@ pub trait ConstMethods : Backend { fn is_const_integral(&self, v: Self::Value) -> bool; fn is_const_real(&self, v: Self::Value) -> bool; + + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: Self::Type, + ) -> Self::Value; + fn from_const_alloc( + &self, + layout: layout::TyLayout<'tcx>, + alloc: &Allocation, + offset: layout::Size, + ) -> PlaceRef<'tcx, Self::Value>; } diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs index b0331ae11ef80..b72deba0040a0 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -8,13 +8,60 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::ty::Ty; +use rustc::ty::{Ty, FnSig}; use super::backend::Backend; +use super::builder::HasCodegen; +use rustc::mir; +use monomorphize::Instance; +use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; +use rustc_data_structures::indexed_vec::IndexVec; +use syntax_pos; +use rustc::hir::def_id::CrateNum; +use syntax::ast::Name; + +pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend { + type DIScope : 'll + Copy; -pub trait DebugInfoMethods<'tcx> : Backend { fn create_vtable_metadata( &self, ty: Ty<'tcx>, vtable: Self::Value, ); + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: FnSig<'tcx>, + llfn: Self::Value, + mir: &mir::Mir, + ) -> FunctionDebugContext<'ll>; + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext<'ll>, + ) -> IndexVec>; + fn extend_scope_to_file( + &self, + scope_metadata: Self::DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, + ) -> Self::DIScope; +} + +pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { + fn declare_local( + &self, + dbg_context: &FunctionDebugContext<'ll>, + variable_name: Name, + variable_type: Ty<'tcx>, + scope_metadata: >::DIScope, + variable_access: VariableAccess<'_, ::Value>, + variable_kind: VariableKind, + span: syntax_pos::Span, + ); + fn set_source_location( + &self, + debug_context: &FunctionDebugContext<'ll>, + scope: Option<>::DIScope>, + span: syntax_pos::Span, + ); } diff --git a/src/librustc_codegen_llvm/interfaces/declare.rs b/src/librustc_codegen_llvm/interfaces/declare.rs new file mode 100644 index 0000000000000..3e02ecabca38d --- /dev/null +++ b/src/librustc_codegen_llvm/interfaces/declare.rs @@ -0,0 +1,47 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::Ty; +use super::backend::Backend; + +pub trait DeclareMethods<'tcx> : Backend{ + fn declare_global( + &self, + name: &str, ty: Self::Type + ) -> Self::Value; + fn declare_cfn( + &self, + name: &str, + fn_type: Self::Type + ) -> Self::Value; + fn declare_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> Self::Value; + fn define_global( + &self, + name: &str, + ty: Self::Type + ) -> Option; + fn define_private_global(&self, ty: Self::Type) -> Self::Value; + fn define_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> Self::Value; + fn define_internal_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> Self::Value; + fn get_declared_value(&self, name: &str) -> Option; + fn get_defined_value(&self, name: &str) -> Option; +} diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs index 1897fee36e99d..a00b48318566a 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -12,9 +12,17 @@ use std::cell::RefCell; use rustc::util::nodemap::FxHashMap; use rustc::ty::{Ty, self, Instance}; use super::backend::Backend; +use rustc::session::Session; +use libc::c_uint; pub trait MiscMethods<'tcx> : Backend { fn vtables(&self) -> &RefCell, Option>), Self::Value>>; + fn check_overflow(&self) -> bool; + fn instances(&self) -> &RefCell, Self::Value>>; fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; + fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; + fn eh_personality(&self) -> Self::Value; + fn eh_unwind_resume(&self) -> Self::Value; + fn sess(&self) -> &Session; } diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 91c82fce38e5f..8e8580d11d032 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -16,20 +16,28 @@ mod intrinsic; mod statics; mod misc; mod debuginfo; +mod abi; +mod declare; +mod asm; pub use self::builder::{BuilderMethods, HasCodegen}; pub use self::backend::Backend; pub use self::consts::ConstMethods; -pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; +pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, + LayoutTypeMethods, ArgTypeMethods}; pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; pub use self::statics::StaticMethods; pub use self::misc::MiscMethods; -pub use self::debuginfo::DebugInfoMethods; +pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; +pub use self::abi::{AbiMethods, AbiBuilderMethods}; +pub use self::declare::DeclareMethods; +pub use self::asm::{AsmMethods, AsmBuilderMethods}; use std::fmt; pub trait CodegenMethods<'ll, 'tcx: 'll> : - Backend + TypeMethods<'ll, 'tcx> + MiscMethods<'tcx> + ConstMethods + - StaticMethods<'tcx> + DebugInfoMethods<'tcx> {} + Backend + TypeMethods<'ll, 'tcx> + MiscMethods<'tcx> + ConstMethods<'tcx> + + StaticMethods<'tcx> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + + IntrinsicDeclarationMethods + DeclareMethods<'tcx> + AsmMethods {} pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 1deb25e0c156e..e556465e40be0 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -9,6 +9,7 @@ // except according to those terms. use super::backend::Backend; +use super::builder::HasCodegen; use common::TypeKind; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; @@ -16,6 +17,9 @@ use std::cell::RefCell; use rustc::util::nodemap::FxHashMap; use rustc::ty::{Ty, TyCtxt}; use rustc::ty::layout::TyLayout; +use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; +use mir::place::PlaceRef; + pub trait BaseTypeMethods<'a, 'tcx: 'a> : Backend { fn type_void(&self) -> Self::Type; @@ -85,6 +89,9 @@ pub trait DerivedTypeMethods<'tcx> : Backend { pub trait LayoutTypeMethods<'tcx> : Backend { fn backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; + fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; + fn reg_backend_type(&self, ty: &Reg) -> Self::Type; fn immediate_backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool; fn scalar_pair_element_backend_type<'a>( @@ -95,5 +102,20 @@ pub trait LayoutTypeMethods<'tcx> : Backend { ) -> Self::Type; } +pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { + fn store_fn_arg( + &self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, dst: PlaceRef<'tcx, ::Value> + ); + fn store_arg_ty( + &self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: ::Value, + dst: PlaceRef<'tcx, ::Value> + ); + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> ::Type; +} + pub trait TypeMethods<'a, 'tcx: 'a> : BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 6adf03a855257..1a7247a0bb593 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -19,7 +19,6 @@ use mir::operand::{OperandRef, OperandValue}; use base::*; use common::*; use context::CodegenCx; -use declare; use glue; use type_::Type; use type_of::LayoutLlvmExt; @@ -412,7 +411,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> }, "discriminant_value" => { - args[0].deref(cx).codegen_get_discr(&self, ret_ty) + args[0].deref(cx).codegen_get_discr(self, ret_ty) } name if name.starts_with("simd_") => { @@ -677,8 +676,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let val = match intr.definition { intrinsics::IntrinsicDef::Named(name) => { - let f = declare::declare_cfn(cx, - name, + let f = cx.declare_cfn( name, cx.type_func(&inputs, outputs)); self.call(f, &llargs, None) } @@ -706,7 +704,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let ptr = &self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx))); &self.store(llval, ptr, result.align); } else { - OperandRef::from_immediate_or_packed_pair(&self, llval, result.layout) + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val.store(self, result); } } @@ -773,11 +771,11 @@ fn try_intrinsic( local_ptr: &'ll Value, dest: &'ll Value, ) { - if bx.sess().no_landing_pads() { + if bx.cx().sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align); - } else if wants_msvc_seh(bx.sess()) { + } else if wants_msvc_seh(bx.cx().sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { codegen_gnu_try(bx, cx, func, data, local_ptr, dest); @@ -975,7 +973,7 @@ fn gen_fn<'ll, 'tcx>( hir::Unsafety::Unsafe, Abi::Rust ))); - let llfn = declare::define_internal_fn(cx, name, rust_fn_ty); + let llfn = cx.define_internal_fn(name, rust_fn_ty); attributes::from_fn_attrs(cx, llfn, None); let bx = Builder::new_block(cx, llfn, "entry-block"); codegen(bx); @@ -1030,7 +1028,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.sess(), span, + bx.cx().sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1201,7 +1199,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.sess(), span, + bx.cx().sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1419,7 +1417,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, + let f = bx.cx().declare_cfn(&llvm_intrinsic, bx.cx().type_func(&[ llvm_pointer_vec_ty, alignment_ty, @@ -1521,7 +1519,7 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic, + let f = bx.cx().declare_cfn(&llvm_intrinsic, bx.cx().type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index e7c1c2de155ea..7f638ddef0b53 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -8,11 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, FnTypeExt}; +use abi::FnType; use callee; -use builder::Builder; use monomorphize; -use value::Value; use interfaces::*; @@ -31,15 +29,18 @@ impl<'a, 'tcx> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - llvtable: &'ll Value, - fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value { + pub fn get_fn>( + self, + bx: &Bx, + llvtable: ::Value, + fn_ty: &FnType<'tcx, Ty<'tcx>> + ) -> ::Value { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); let llvtable = bx.pointercast( llvtable, - bx.cx().type_ptr_to(bx.cx().type_ptr_to(fn_ty.llvm_type(bx.cx()))) + bx.cx().type_ptr_to(bx.cx().type_ptr_to(bx.cx().fn_backend_type(fn_ty))) ); let ptr_align = bx.tcx().data_layout.pointer_align; let ptr = bx.load( diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs index ef361bf19c1c3..46e7d3edeb33f 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_llvm/mir/analyze.rs @@ -17,14 +17,17 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, PlaceContext}; use rustc::mir::traversal; -use rustc::ty; -use rustc::ty::layout::LayoutOf; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use type_of::LayoutLlvmExt; use super::FunctionCx; -use value::Value; use interfaces::*; -pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet { +pub fn non_ssa_locals<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( + fx: &FunctionCx<'a, 'll, 'tcx, Cx> +) -> BitSet + where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let mir = fx.mir; let mut analyzer = LocalAnalyzer::new(fx); @@ -53,8 +56,8 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet { - fx: &'mir FunctionCx<'a, 'll, 'tcx, V>, +struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { + fx: &'mir FunctionCx<'a, 'll, 'tcx, Cx>, dominators: Dominators, non_ssa_locals: BitSet, // The location of the first visited direct assignment to each @@ -62,8 +65,10 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, V: 'll> { first_assignment: IndexVec } -impl LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { - fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> Self { +impl> LocalAnalyzer<'mir, 'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx, Cx>) -> Self { let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); let mut analyzer = LocalAnalyzer { @@ -104,7 +109,10 @@ impl LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { } } -impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { +impl<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Visitor<'tcx> + for LocalAnalyzer<'mir, 'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ fn visit_assign(&mut self, block: mir::BasicBlock, place: &mir::Place<'tcx>, @@ -139,7 +147,7 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { _ => None, }; if let Some((def_id, args)) = check { - if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() { + if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -166,18 +174,18 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { _ => false }; if is_consume { - let base_ty = proj.base.ty(self.fx.mir, cx.tcx); + let base_ty = proj.base.ty(self.fx.mir, *cx.tcx()); let base_ty = self.fx.monomorphize(&base_ty); // ZSTs don't require any actual memory access. - let elem_ty = base_ty.projection_ty(cx.tcx, &proj.elem).to_ty(cx.tcx); + let elem_ty = base_ty.projection_ty(*cx.tcx(), &proj.elem).to_ty(*cx.tcx()); let elem_ty = self.fx.monomorphize(&elem_ty); if cx.layout_of(elem_ty).is_zst() { return; } if let mir::ProjectionElem::Field(..) = proj.elem { - let layout = cx.layout_of(base_ty.to_ty(cx.tcx)); + let layout = cx.layout_of(base_ty.to_ty(*cx.tcx())); if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { // Recurse with the same context, instead of `Projection`, // potentially stopping at non-operand projections, @@ -236,8 +244,8 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> { } PlaceContext::Drop => { - let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx); - let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx)); + let ty = mir::Place::Local(local).ty(self.fx.mir, *self.fx.cx.tcx()); + let ty = self.fx.monomorphize(&ty.to_ty(*self.fx.cx.tcx())); // Only need the place if we're actually dropping it. if self.fx.cx.type_needs_drop(ty) { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index a5ccb3181bc4e..ca7bd793214db 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -8,22 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, BasicBlock}; use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, TyLayout}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; -use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; +use abi::{Abi, FnType, PassMode}; +use rustc_target::abi::call::ArgType; use base; -use callee; -use builder::{Builder, MemFlags}; +use builder::MemFlags; use common::{self, IntPredicate}; use meth; use monomorphize; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; use interfaces::*; @@ -35,9 +31,14 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { - pub fn codegen_block(&mut self, bb: mir::BasicBlock) { - let mut bx = self.build_block(bb); +impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'll, 'tcx, Cx> + where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_block>(&mut self, bb: mir::BasicBlock) + where Bx: BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx> + { + let mut bx : Bx = self.build_block(bb); let data = &self.mir[bb]; debug!("codegen_block({:?}={:?})", bb, data); @@ -49,11 +50,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { self.codegen_terminator(bx, bb, data.terminator()); } - fn codegen_terminator(&mut self, - mut bx: Builder<'a, 'll, 'tcx, &'ll Value>, - bb: mir::BasicBlock, - terminator: &mir::Terminator<'tcx>) - { + fn codegen_terminator>( + &mut self, + mut bx: Bx, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx> + ) where Bx: BuilderMethods<'a, 'll, 'tcx, CodegenCx = Cx> { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. @@ -75,11 +77,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { => (lltarget, false), (None, Some(_)) => { // jump *into* cleanup - need a landing pad if GNU - (this.landing_pad_to(target), false) + (this.landing_pad_to::(target), false) } (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator), (Some(_), Some(_)) => { - (this.landing_pad_to(target), true) + (this.landing_pad_to::(target), true) } } }; @@ -91,7 +93,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.new_block(name); + let trampoline : Bx = this.new_block(name); trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); trampoline.llbb() } else { @@ -100,7 +102,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }; let funclet_br = - |this: &mut Self, bx: Builder<'_, 'll, '_, &'ll Value>, target: mir::BasicBlock| { + |this: &mut Self, bx: &Bx, target: mir::BasicBlock| { let (lltarget, is_cleanupret) = lltarget(this, target); if is_cleanupret { // micro-optimization: generate a `ret` rather than a jump @@ -113,44 +115,44 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let do_call = | this: &mut Self, - bx: Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, fn_ty: FnType<'tcx, Ty<'tcx>>, - fn_ptr: &'ll Value, - llargs: &[&'ll Value], - destination: Option<(ReturnDest<'tcx, &'ll Value>, mir::BasicBlock)>, + fn_ptr: Cx::Value, + llargs: &[Cx::Value], + destination: Option<(ReturnDest<'tcx, Cx::Value>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { let ret_bx = if let Some((_, target)) = destination { this.blocks[target] } else { - this.unreachable_block() + this.unreachable_block::() }; let invokeret = bx.invoke(fn_ptr, &llargs, ret_bx, llblock(this, cleanup), cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, invokeret); + bx.apply_attrs_callsite(&fn_ty, invokeret); if let Some((ret_dest, target)) = destination { - let ret_bx = this.build_block(target); + let ret_bx = this.build_block::(target); this.set_debug_loc(&ret_bx, terminator.source_info); this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret); } } else { let llret = bx.call(fn_ptr, &llargs, cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, llret); + bx.apply_attrs_callsite(&fn_ty, llret); if this.mir[bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested // struct, there are "symmetry" issues that cause // exponential inlining - see issue #41696. - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + bx.do_not_inline(llret); } if let Some((ret_dest, target)) = destination { - this.store_return(&bx, ret_dest, &fn_ty.ret, llret); + this.store_return(bx, ret_dest, &fn_ty.ret, llret); funclet_br(this, bx, target); } else { bx.unreachable(); @@ -165,11 +167,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.cleanup_ret(cleanup_pad, None); } else { let slot = self.get_personality_slot(&bx); - let lp0 = &bx.load_ref(&slot.project_field(&bx, 0)).immediate(); - let lp1 = &bx.load_ref(&slot.project_field(&bx, 1)).immediate(); + let lp0 = bx.load_ref(&slot.project_field(&bx, 0)).immediate(); + let lp1 = bx.load_ref(&slot.project_field(&bx, 1)).immediate(); slot.storage_dead(&bx); - if !bx.sess().target.target.options.custom_unwind_resume { + if !bx.cx().sess().target.target.options.custom_unwind_resume { let mut lp = bx.cx().const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); @@ -189,7 +191,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } mir::TerminatorKind::Goto { target } => { - funclet_br(self, bx, target); + funclet_br(self, &bx, target); } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { @@ -207,7 +209,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.cond_br(discr.immediate(), lltrue, llfalse); } } else { - let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); + let switch_llty = bx.cx().immediate_backend_type( + &bx.cx().layout_of(switch_ty) + ); let llval = bx.cx().const_uint_big(switch_llty, values[0]); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); @@ -217,9 +221,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let switch = bx.switch(discr.immediate(), llblock(self, *otherwise), values.len()); - let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); + let switch_llty = bx.cx().immediate_backend_type( + &bx.cx().layout_of(switch_ty) + ); for (&value, target) in values.iter().zip(targets) { - let llval =bx.cx().const_uint_big(switch_llty, value); + let llval = bx.cx().const_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -267,7 +273,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } }; bx.load( - bx.pointercast(llslot, bx.cx().type_ptr_to(cast_ty.llvm_type(bx.cx()))), + bx.pointercast(llslot, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&cast_ty) + )), self.fn_ty.ret.layout.align) } }; @@ -281,11 +289,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx, ty); + let drop_fn = monomorphize::resolve_drop_in_place(*bx.cx().tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - funclet_br(self, bx, target); + funclet_br(self, &bx, target); return } @@ -300,23 +308,23 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }; let (drop_fn, fn_ty) = match ty.sty { ty::Dynamic(..) => { - let fn_ty = drop_fn.ty(bx.cx().tcx); + let fn_ty = drop_fn.ty(*bx.cx().tcx()); let sig = common::ty_fn_sig(bx.cx(), fn_ty); let sig = bx.tcx().normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - let fn_ty = FnType::new_vtable(bx.cx(), sig, &[]); + let fn_ty = bx.cx().new_vtable(sig, &[]); let vtable = args[1]; args = &args[..1]; (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) } _ => { - (callee::get_fn(bx.cx(), drop_fn), - FnType::of_instance(bx.cx(), &drop_fn)) + (bx.cx().get_fn(drop_fn), + bx.cx().fn_type_of_instance(&drop_fn)) } }; - do_call(self, bx, fn_ty, drop_fn, args, + do_call(self, &bx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); } @@ -332,7 +340,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bx.cx().check_overflow { + if !bx.cx().check_overflow() { if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { const_cond = Some(expected); } @@ -340,7 +348,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - funclet_br(self, bx, target); + funclet_br(self, &bx, target); return; } @@ -350,7 +358,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.new_block("panic"); + let panic_block : Bx = self.new_block("panic"); if expected { bx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -362,7 +370,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { self.set_debug_loc(&bx, terminator.source_info); // Get the location information. - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); let filename = bx.cx().const_str_slice(filename); let line = bx.cx().const_u32(loc.line as u32); @@ -374,8 +382,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { EvalErrorKind::BoundsCheck { ref len, ref index } => { - let len = self.codegen_operand(&mut bx, len).immediate(); - let index = self.codegen_operand(&mut bx, index).immediate(); + let len = self.codegen_operand(&bx, len).immediate(); + let index = self.codegen_operand(&bx, index).immediate(); let file_line_col = bx.cx().const_struct(&[filename, line, col], false); let file_line_col = bx.cx().static_addr_of( @@ -407,11 +415,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx(), &instance); - let llfn = callee::get_fn(bx.cx(), instance); + let fn_ty = bx.cx().fn_type_of_instance(&instance); + let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. - do_call(self, bx, fn_ty, llfn, &args, None, cleanup); + do_call(self, &bx, fn_ty, llfn, &args, None, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { @@ -430,7 +438,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { - (Some(ty::Instance::resolve(bx.cx().tcx, + (Some(ty::Instance::resolve(*bx.cx().tcx(), ty::ParamEnv::reveal_all(), def_id, substs).unwrap()), @@ -461,7 +469,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; self.codegen_transmute(&bx, &args[0], dest); - funclet_br(self, bx, target); + funclet_br(self, &bx, target); } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -483,15 +491,15 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let fn_ty = match def { Some(ty::InstanceDef::Virtual(..)) => { - FnType::new_vtable(bx.cx(), sig, &extra_args) + bx.cx().new_vtable(sig, &extra_args) } Some(ty::InstanceDef::DropGlue(_, None)) => { // empty drop glue - a nop. let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, bx, target); + funclet_br(self, &bx, target); return; } - _ => FnType::new(bx.cx(), sig, &extra_args) + _ => bx.cx().new_fn_type(sig, &extra_args) }; // emit a panic instead of instantiating an uninhabited type @@ -559,7 +567,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - bx.cx().const_undef(bx.cx().type_ptr_to(fn_ty.ret.memory_ty(bx.cx()))) + bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -621,8 +629,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }).collect(); - let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx); - &bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, + let callee_ty = instance.as_ref().unwrap().ty(*bx.cx().tcx()); + bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { @@ -630,7 +638,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } if let Some((_, target)) = *destination { - funclet_br(self, bx, target); + funclet_br(self, &bx, target); } else { bx.unreachable(); } @@ -678,11 +686,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, - (None, Some(instance)) => callee::get_fn(bx.cx(), instance), + (None, Some(instance)) => bx.cx().get_fn(instance), _ => span_bug!(span, "no llfn for call"), }; - do_call(self, bx, fn_ty, fn_ptr, &llargs, + do_call(self, &bx, fn_ty, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } @@ -693,14 +701,16 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - fn codegen_argument(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - op: OperandRef<'tcx, &'ll Value>, - llargs: &mut Vec<&'ll Value>, - arg: &ArgType<'tcx, Ty<'tcx>>) { + fn codegen_argument>( + &mut self, + bx: &Bx, + op: OperandRef<'tcx, Cx::Value>, + llargs: &mut Vec, + arg: &ArgType<'tcx, Ty<'tcx>> + ) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(bx.cx().const_undef(ty.llvm_type(bx.cx()))); + llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty))) } if arg.is_ignore() { @@ -759,8 +769,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(ty.llvm_type(bx.cx()))), - align.min(arg.layout.align)); + llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&ty)) + ), align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -781,11 +792,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { llargs.push(llval); } - fn codegen_arguments_untupled(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - operand: &mir::Operand<'tcx>, - llargs: &mut Vec<&'ll Value>, - args: &[ArgType<'tcx, Ty<'tcx>>]) { + fn codegen_arguments_untupled>( + &mut self, + bx: &Bx, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + args: &[ArgType<'tcx, Ty<'tcx>>] + ) { let tuple = self.codegen_operand(bx, operand); // Handle both by-ref and immediate tuples. @@ -806,17 +819,17 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - fn get_personality_slot( + fn get_personality_slot>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value> - ) -> PlaceRef<'tcx, &'ll Value> { + bx: &Bx + ) -> PlaceRef<'tcx, Cx::Value> where Bx : BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx> { let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of(cx.tcx.intern_tup(&[ - cx.tcx.mk_mut_ptr(cx.tcx.types.u8), - cx.tcx.types.i32 + let layout = cx.layout_of(cx.tcx().intern_tup(&[ + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + cx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); @@ -827,23 +840,29 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock { + fn landing_pad_to>( + &mut self, + target_bb: mir::BasicBlock + ) -> Cx::BasicBlock { if let Some(block) = self.landing_pads[target_bb] { return block; } let block = self.blocks[target_bb]; - let landing_pad = self.landing_pad_uncached(block); + let landing_pad = self.landing_pad_uncached::(block); self.landing_pads[target_bb] = Some(landing_pad); landing_pad } - fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBlock { + fn landing_pad_uncached>( + &mut self, + target_bb: Cx::BasicBlock + ) -> Cx::BasicBlock { if base::wants_msvc_seh(self.cx.sess()) { span_bug!(self.mir.span, "landing pad was not inserted?") } - let bx = self.new_block("cleanup"); + let bx : Bx = self.new_block("cleanup"); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); @@ -858,34 +877,42 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.llbb() } - fn landing_pad_type(&self) -> &'ll Type { + fn landing_pad_type(&self) -> Cx::Type { let cx = self.cx; - cx.type_struct( &[cx.type_i8p(), cx.type_i32()], false) + cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false) } - fn unreachable_block(&mut self) -> &'ll BasicBlock { + fn unreachable_block>( + &mut self + ) -> Cx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let bl = self.new_block("unreachable"); + let bl : Bx = self.new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() }) } - pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx, &'ll Value> { - Builder::new_block(self.cx, self.llfn, name) + pub fn new_block>(&self, name: &str) -> Bx { + Bx::new_block(self.cx, self.llfn, name) } - pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx, &'ll Value> { - let bx = Builder::with_cx(self.cx); + pub fn build_block>( + &self, + bb: mir::BasicBlock + ) -> Bx { + let bx = Bx::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } - fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, - llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) - -> ReturnDest<'tcx, &'ll Value> { + fn make_return_dest>( + &mut self, + bx: &Bx, + dest: &mir::Place<'tcx>, + fn_ret: &ArgType<'tcx, Ty<'tcx>>, + llargs: &mut Vec, is_intrinsic: bool + ) -> ReturnDest<'tcx, Cx::Value> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret.is_ignore() { return ReturnDest::Nothing; @@ -939,9 +966,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - src: &mir::Operand<'tcx>, - dst: &mir::Place<'tcx>) { + fn codegen_transmute>( + &mut self, + bx: &Bx, + src: &mir::Operand<'tcx>, + dst: &mir::Place<'tcx> + ) { if let mir::Place::Local(index) = *dst { match self.locals[index] { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), @@ -967,11 +997,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - src: &mir::Operand<'tcx>, - dst: PlaceRef<'tcx, &'ll Value>) { + fn codegen_transmute_into>( + &mut self, + bx: &Bx, + src: &mir::Operand<'tcx>, + dst: PlaceRef<'tcx, Cx::Value> + ) { let src = self.codegen_operand(bx, src); - let llty = src.layout.llvm_type(bx.cx()); + let llty = bx.cx().backend_type(&src.layout); let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty)); let align = src.layout.align.min(dst.layout.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); @@ -979,16 +1012,18 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Stores the return value of a function call into it's final location. - fn store_return(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dest: ReturnDest<'tcx, &'ll Value>, - ret_ty: &ArgType<'tcx, Ty<'tcx>>, - llval: &'ll Value) { + fn store_return>( + &mut self, + bx: &Bx, + dest: ReturnDest<'tcx, Cx::Value>, + ret_ty: &ArgType<'tcx, Ty<'tcx>>, + llval: Cx::Value + ) { use self::ReturnDest::*; match dest { Nothing => (), - Store(dst) => ret_ty.store(bx, llval, dst), + Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst), IndirectOperand(tmp, index) => { let op = bx.load_ref(&tmp); tmp.storage_dead(bx); @@ -999,7 +1034,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let op = if let PassMode::Cast(_) = ret_ty.mode { let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); - ret_ty.store(bx, llval, tmp); + bx.store_arg_ty(&ret_ty, llval, tmp); let op = bx.load_ref(&tmp); tmp.storage_dead(bx); op diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index cb0e1de94e3a8..7d7b19b037df1 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -8,83 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm; use rustc::mir::interpret::{ConstEvalErr, read_target_uint}; use rustc_mir::const_eval::const_field; use rustc::hir::def_id::DefId; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::sync::Lrc; -use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType}; +use rustc::mir::interpret::{GlobalId, Pointer, Allocation, ConstValue}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; -use builder::Builder; -use common::{CodegenCx}; -use type_of::LayoutLlvmExt; -use type_::Type; -use syntax::ast::Mutability; +use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, TyLayout, HasTyCtxt}; +use common::CodegenCx; use syntax::source_map::Span; use value::Value; -use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, StaticMethods}; +use interfaces::*; -use super::super::callee; use super::FunctionCx; -pub fn scalar_to_llvm( - cx: &CodegenCx<'ll, '_, &'ll Value>, - cv: Scalar, - layout: &layout::Scalar, - llty: &'ll Type, -) -> &'ll Value { - let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; - match cv { - Scalar::Bits { size: 0, .. } => { - assert_eq!(0, layout.value.size(cx).bytes()); - cx.const_undef(cx.type_ix(0)) - }, - Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = cx.const_uint_big(cx.type_ix(bitsize), bits); - if layout.value == layout::Pointer { - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } - } else { - cx.static_bitcast(llval, llty) - } - }, - Scalar::Ptr(ptr) => { - let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id); - let base_addr = match alloc_type { - Some(AllocType::Memory(alloc)) => { - let init = const_alloc_to_llvm(cx, alloc); - if alloc.mutability == Mutability::Mutable { - cx.static_addr_of_mut(init, alloc.align, None) - } else { - cx.static_addr_of(init, alloc.align, None) - } - } - Some(AllocType::Function(fn_instance)) => { - callee::get_fn(cx, fn_instance) - } - Some(AllocType::Static(def_id)) => { - assert!(cx.tcx.is_static(def_id).is_some()); - cx.get_static(def_id) - } - None => bug!("missing allocation {:?}", ptr.alloc_id), - }; - let llval = unsafe { llvm::LLVMConstInBoundsGEP( - cx.static_bitcast(base_addr, cx.type_i8p()), - &cx.const_usize(ptr.offset.bytes()), - 1, - ) }; - if layout.value != layout::Pointer { - unsafe { llvm::LLVMConstPtrToInt(llval, llty) } - } else { - cx.static_bitcast(llval, llty) - } - } - } -} - pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocation) -> &'ll Value { let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); let layout = cx.data_layout(); @@ -102,8 +41,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati layout.endian, &alloc.bytes[offset..(offset + pointer_size)], ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - llvals.push(scalar_to_llvm( - cx, + llvals.push(cx.scalar_to_backend( Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), &layout::Scalar { value: layout::Primitive::Pointer, @@ -139,10 +77,12 @@ pub fn codegen_static_initializer( Ok((const_alloc_to_llvm(cx, alloc), alloc)) } -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { - fn fully_evaluate( +impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> + where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn fully_evaluate>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, constant: &'tcx ty::Const<'tcx>, ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { match constant.val { @@ -160,9 +100,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - pub fn eval_mir_constant( + pub fn eval_mir_constant>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, constant: &mir::Constant<'tcx>, ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { let c = self.monomorphize(&constant.literal); @@ -170,13 +110,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } /// process constant containing SIMD shuffle indices - pub fn simd_shuffle_indices( + pub fn simd_shuffle_indices>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, span: Span, ty: Ty<'tcx>, constant: Result<&'tcx ty::Const<'tcx>, Lrc>>, - ) -> (&'ll Value, Ty<'tcx>) { + ) -> (Cx::Value, Ty<'tcx>) { constant .and_then(|c| { let field_ty = c.ty.builtin_index().unwrap(); @@ -199,9 +139,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - Ok(scalar_to_llvm( - bx.cx(), prim, scalar, - layout.immediate_llvm_type(bx.cx()), + Ok(bx.cx().scalar_to_backend( + prim, scalar, + bx.cx().immediate_backend_type(&layout), )) } else { bug!("simd shuffle field {:?}", field) @@ -217,7 +157,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { ); // We've errored, so we don't have to produce working code. let ty = self.monomorphize(&ty); - let llty = bx.cx().layout_of(ty).llvm_type(bx.cx()); + let llty = bx.cx().backend_type(&bx.cx().layout_of(ty)); (bx.cx().const_undef(llty), ty) }) } diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 5cc8cb41bee88..9f4f2f603aca9 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -9,21 +9,18 @@ // except according to those terms. use libc::c_uint; -use llvm::{self, BasicBlock}; -use llvm::debuginfo::DIScope; +use llvm; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; -use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; use base; -use builder::Builder; -use common::{CodegenCx, Funclet}; -use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; +use common::Funclet; +use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; -use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; -use value::Value; -use interfaces::{BuilderMethods, ConstMethods, DerivedTypeMethods}; +use abi::{FnType, PassMode}; +use interfaces::*; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -42,16 +39,16 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> { +pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, debug_context: FunctionDebugContext<'ll>, - llfn: V, + llfn: Cx::Value, - cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, + cx: &'a Cx, fn_ty: FnType<'tcx, Ty<'tcx>>, @@ -62,10 +59,10 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - personality_slot: Option>, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec, + blocks: IndexVec, /// The funclet status of each basic block cleanup_kinds: IndexVec, @@ -73,14 +70,14 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> { /// When targeting MSVC, this stores the cleanup info for each funclet /// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets' /// head block in RPO. - funclets: &'a IndexVec>>, + funclets: &'a IndexVec>>, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>, + landing_pads: IndexVec>, /// Cached unreachable block - unreachable_block: Option<&'ll BasicBlock>, + unreachable_block: Option, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: @@ -97,36 +94,42 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec>, + locals: IndexVec>, /// Debug information for MIR scopes. - scopes: IndexVec>, + scopes: IndexVec>, /// If this function is being monomorphized, this contains the type substitutions used. param_substs: &'tcx Substs<'tcx>, } -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'll, 'tcx, Cx> +{ pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { - self.cx.tcx.subst_and_normalize_erasing_regions( + self.cx.tcx().subst_and_normalize_erasing_regions( self.param_substs, ty::ParamEnv::reveal_all(), value, ) } +} - pub fn set_debug_loc( +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'll, 'tcx, Cx> +{ + pub fn set_debug_loc>( &mut self, - bx: &Builder<'_, 'll, '_, &'ll Value>, + bx: &Bx, source_info: mir::SourceInfo - ) { + ) where Bx::CodegenCx : DebugInfoMethods<'ll, 'tcx, DIScope = Cx::DIScope> { let (scope, span) = self.debug_loc(source_info); - debuginfo::set_source_location(&self.debug_context, bx, scope, span); + bx.set_source_location(&self.debug_context, scope, span); } - pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) { + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option, Span) { // Bail out if debug info emission is not enabled. match self.debug_context { FunctionDebugContext::DebugInfoDisabled | @@ -166,16 +169,17 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // corresponding to span's containing source scope. If so, we need to create a DIScope // "extension" into that file. fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos) - -> Option<&'ll DIScope> { + -> Option { let scope_metadata = self.scopes[scope_id].scope_metadata; if pos < self.scopes[scope_id].file_start_pos || pos >= self.scopes[scope_id].file_end_pos { let cm = self.cx.sess().source_map(); let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate; - Some(debuginfo::extend_scope_to_file(self.cx, - scope_metadata.unwrap(), - &cm.lookup_char_pos(pos).file, - defining_crate)) + Some(self.cx.extend_scope_to_file( + scope_metadata.unwrap(), + &cm.lookup_char_pos(pos).file, + defining_crate + )) } else { scope_metadata } @@ -192,11 +196,11 @@ enum LocalRef<'tcx, V> { Operand(Option>), } -impl LocalRef<'tcx, &'ll Value> { - fn new_operand( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, +impl<'ll, 'tcx: 'll, V : CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + cx: &Cx, layout: TyLayout<'tcx> - ) -> LocalRef<'tcx, &'ll Value> { + ) -> LocalRef<'tcx, V> where Cx: Backend { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -210,18 +214,18 @@ impl LocalRef<'tcx, &'ll Value> { /////////////////////////////////////////////////////////////////////////// -pub fn codegen_mir( - cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, - llfn: &'ll Value, +pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + llfn: ::Value, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, -) { - let fn_ty = FnType::new(cx, sig, &[]); +) where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir); - let bx = Builder::new_block(cx, llfn, "start"); + cx.create_function_debug_context(instance, sig, llfn, mir); + let bx = Bx::new_block(cx, llfn, "start"); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); @@ -231,7 +235,7 @@ pub fn codegen_mir( // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); - let block_bxs: IndexVec = + let block_bxs: IndexVec::BasicBlock> = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK && !reentrant_start_block { bx.llbb() @@ -241,7 +245,7 @@ pub fn codegen_mir( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context); + let scopes = cx.create_mir_scopes(mir, &debug_context); let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { @@ -279,7 +283,7 @@ pub fn codegen_mir( if let Some(name) = decl.name { // User variable let debug_scope = fx.scopes[decl.visibility_scope]; - let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full; + let dbg = debug_scope.is_valid() && bx.cx().sess().opts.debuginfo == DebugInfo::Full; if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); @@ -299,7 +303,7 @@ pub fn codegen_mir( span: decl.source_info.span, scope: decl.visibility_scope, }); - declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(), + bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(), VariableAccess::DirectVariable { alloca: place.llval }, VariableKind::LocalVariable, span); } @@ -309,7 +313,7 @@ pub fn codegen_mir( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = llvm::get_param(llfn, 0); + let llretptr = fx.cx.get_param(llfn, 0); LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); @@ -353,7 +357,7 @@ pub fn codegen_mir( // Codegen the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); - fx.codegen_block(bb); + fx.codegen_block::(bb); } // Remove blocks that haven't been visited, or have no @@ -362,24 +366,22 @@ pub fn codegen_mir( // Unreachable block if !visited.contains(bb.index()) { debug!("codegen_mir: block {:?} was not visited", bb); - unsafe { - llvm::LLVMDeleteBasicBlock(fx.blocks[bb]); - } + bx.delete_basic_block(fx.blocks[bb]); } } } -fn create_funclets( +fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( mir: &'a Mir<'tcx>, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, cleanup_kinds: &IndexVec, - block_bxs: &IndexVec) - -> (IndexVec>, - IndexVec>>) + block_bxs: &IndexVec::BasicBlock>) + -> (IndexVec::BasicBlock>>, + IndexVec::Value>>>) { block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { - CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {} + CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {} _ => return (None, None) } @@ -438,12 +440,17 @@ fn create_funclets( /// Produce, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. -fn arg_local_refs( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>, - scopes: &IndexVec>, +fn arg_local_refs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + fx: &FunctionCx<'a, 'll, 'tcx, Bx::CodegenCx>, + scopes: &IndexVec< + mir::SourceScope, + debuginfo::MirDebugScope<>::DIScope> + >, memory_locals: &BitSet, -) -> Vec> { +) -> Vec::Value>> + where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let mir = fx.mir; let tcx = bx.tcx(); let mut idx = 0; @@ -451,7 +458,7 @@ fn arg_local_refs( // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; - let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { + let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full { arg_scope.scope_metadata } else { None @@ -485,7 +492,7 @@ fn arg_local_refs( if arg.pad.is_some() { llarg_idx += 1; } - arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i)); + bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i)); } // Now that we have one alloca that contains the aggregate value, @@ -494,8 +501,7 @@ fn arg_local_refs( let variable_access = VariableAccess::DirectVariable { alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, @@ -524,18 +530,18 @@ fn arg_local_refs( return local(OperandRef::new_zst(bx.cx(), arg.layout)); } PassMode::Direct(_) => { - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -552,16 +558,16 @@ fn arg_local_refs( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; - let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); @@ -570,7 +576,7 @@ fn arg_local_refs( tmp } else { let tmp = PlaceRef::alloca(bx, arg.layout, &name); - arg.store_fn_arg(bx, &mut llarg_idx, tmp); + bx.store_fn_arg(arg, &mut llarg_idx, tmp); tmp }; arg_scope.map(|scope| { @@ -584,8 +590,7 @@ fn arg_local_refs( alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg.layout.ty, @@ -657,8 +662,7 @@ fn arg_local_refs( alloca: env_ptr, address_operations: &ops }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, decl.debug_name, ty, @@ -679,7 +683,7 @@ fn arg_local_refs( mod analyze; mod block; -mod constant; +pub mod constant; pub mod place; pub mod operand; mod rvalue; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 4aa7427b667a7..df441b6dd536b 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -11,14 +11,12 @@ use rustc::mir::interpret::{ConstValue, ConstEvalErr}; use rustc::mir; use rustc::ty; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasTyCtxt}; use rustc_data_structures::sync::Lrc; use base; -use common::CodegenCx; use builder::{Builder, MemFlags}; use value::Value; -use type_of::LayoutLlvmExt; use glue; use interfaces::*; @@ -26,7 +24,6 @@ use interfaces::*; use std::fmt; use super::{FunctionCx, LocalRef}; -use super::constant::scalar_to_llvm; use super::place::PlaceRef; /// The representation of a Rust value. The enum variant is in fact @@ -62,13 +59,13 @@ pub struct OperandRef<'tcx, V> { pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'tcx, &'ll Value> { +impl fmt::Debug for OperandRef<'tcx, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } -impl<'ll, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { pub fn new_zst>( cx: &Cx, layout: TyLayout<'tcx> @@ -79,12 +76,14 @@ impl<'ll, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { layout } } -} -impl OperandRef<'tcx, &'ll Value> { - pub fn from_const(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - val: &'tcx ty::Const<'tcx>) - -> Result, Lrc>> { + pub fn from_const>( + bx: &Bx, + val: &'tcx ty::Const<'tcx> + ) -> Result, Lrc>> where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let layout = bx.cx().layout_of(val.ty); if layout.is_zst() { @@ -98,11 +97,10 @@ impl OperandRef<'tcx, &'ll Value> { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = scalar_to_llvm( - bx.cx(), + let llval = bx.cx().scalar_to_backend( x, scalar, - layout.immediate_llvm_type(bx.cx()), + bx.cx().immediate_backend_type(&layout), ); OperandValue::Immediate(llval) }, @@ -111,13 +109,12 @@ impl OperandRef<'tcx, &'ll Value> { layout::Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = scalar_to_llvm( - bx.cx(), + let a_llval = bx.cx().scalar_to_backend( a, a_scalar, - layout.scalar_pair_element_llvm_type(bx.cx(), 0, true), + bx.cx().scalar_pair_element_backend_type(&layout, 0, true), ); - let b_layout = layout.scalar_pair_element_llvm_type(bx.cx(), 1, true); + let b_layout = bx.cx().scalar_pair_element_backend_type(&layout, 1, true); let b_llval = scalar_to_llvm( bx.cx(), b, @@ -127,7 +124,7 @@ impl OperandRef<'tcx, &'ll Value> { OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(_, alloc, offset) => { - return Ok(bx.load_ref(&PlaceRef::from_const_alloc(bx, layout, alloc, offset))); + return Ok(bx.load_ref(&bx.cx().from_const_alloc(layout, alloc, offset))); }, }; @@ -136,17 +133,25 @@ impl OperandRef<'tcx, &'ll Value> { layout }) } +} +impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { /// Asserts that this operand refers to a scalar and returns /// a reference to its value. - pub fn immediate(self) -> &'ll Value { + pub fn immediate(self) -> V { match self.val { OperandValue::Immediate(s) => s, _ => bug!("not immediate: {:?}", self) } } - pub fn deref(self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> PlaceRef<'tcx, &'ll Value> { + pub fn deref>( + self, + cx: &'a Cx + ) -> PlaceRef<'tcx, V> where + Cx: Backend, + &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let projected_ty = self.layout.ty.builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { @@ -165,9 +170,12 @@ impl OperandRef<'tcx, &'ll Value> { /// If this operand is a `Pair`, we return an aggregate with the two values. /// For other cases, see `immediate`. - pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) -> &'ll Value { + pub fn immediate_or_packed_pair>( + self, + bx: &Bx + ) -> V where Bx::CodegenCx : Backend { if let OperandValue::Pair(a, b) = self.val { - let llty = self.layout.llvm_type(bx.cx()); + let llty = bx.cx().backend_type(&self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. @@ -181,10 +189,13 @@ impl OperandRef<'tcx, &'ll Value> { } /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. - pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - llval: &'ll Value, - layout: TyLayout<'tcx>) - -> OperandRef<'tcx, &'ll Value> { + pub fn from_immediate_or_packed_pair>( + bx: &Bx, + llval: ::Value, + layout: TyLayout<'tcx> + ) -> OperandRef<'tcx, ::Value> + where Bx::CodegenCx : Backend + { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); @@ -199,10 +210,13 @@ impl OperandRef<'tcx, &'ll Value> { OperandRef { val, layout } } - pub fn extract_field( - &self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + pub fn extract_field>( + &self, bx: &Bx, i: usize - ) -> OperandRef<'tcx, &'ll Value> { + ) -> OperandRef<'tcx, ::Value> where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); @@ -244,11 +258,11 @@ impl OperandRef<'tcx, &'ll Value> { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. match val { OperandValue::Immediate(ref mut llval) => { - *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx())); + *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(&field)); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx(), 0, true)); - *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx(), 1, true)); + *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(&field, 0, true)); + *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(&field, 1, true)); } OperandValue::Ref(..) => bug!() } @@ -333,14 +347,14 @@ impl OperandValue { } } } -} - -impl OperandValue<&'ll Value> { - pub fn store_unsized( + pub fn store_unsized<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - indirect_dest: PlaceRef<'tcx, &'ll Value> - ) { + bx: &Bx, + indirect_dest: PlaceRef<'tcx, V> + ) where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); let flags = MemFlags::empty(); @@ -370,11 +384,13 @@ impl OperandValue<&'ll Value> { } } -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { - fn maybe_codegen_consume_direct(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - place: &mir::Place<'tcx>) - -> Option> +impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> { + fn maybe_codegen_consume_direct>( + &mut self, + bx: &Bx, + place: &mir::Place<'tcx> + ) -> Option> where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("maybe_codegen_consume_direct(place={:?})", place); @@ -419,10 +435,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { None } - pub fn codegen_consume(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - place: &mir::Place<'tcx>) - -> OperandRef<'tcx, &'ll Value> + pub fn codegen_consume>( + &mut self, + bx: &Bx, + place: &mir::Place<'tcx> + ) -> OperandRef<'tcx, Cx::Value> where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("codegen_consume(place={:?})", place); @@ -443,10 +461,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { bx.load_ref(&self.codegen_place(bx, place)) } - pub fn codegen_operand(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - operand: &mir::Operand<'tcx>) - -> OperandRef<'tcx, &'ll Value> + pub fn codegen_operand>( + &mut self, + bx: &Bx, + operand: &mir::Operand<'tcx> + ) -> OperandRef<'tcx, Cx::Value> where + Bx : BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("codegen_operand(operand={:?})", operand); @@ -472,7 +493,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // We've errored, so we don't have to produce working code. let layout = bx.cx().layout_of(ty); bx.load_ref(&PlaceRef::new_sized( - bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))), + bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(&layout))), layout, layout.align, )) diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 488ce90f17f35..c11fc9bcffd53 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -8,17 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::LLVMConstInBoundsGEP; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, HasTyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use builder::Builder; -use common::{CodegenCx, IntPredicate}; +use common::IntPredicate; use type_of::LayoutLlvmExt; -use value::Value; use glue; -use mir::constant::const_alloc_to_llvm; use interfaces::*; @@ -40,12 +36,12 @@ pub struct PlaceRef<'tcx, V> { pub align: Align, } -impl PlaceRef<'tcx, &'ll Value> { +impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized( - llval: &'ll Value, + llval: V, layout: TyLayout<'tcx>, align: Align, - ) -> PlaceRef<'tcx, &'ll Value> { + ) -> PlaceRef<'tcx, V> { assert!(!layout.is_unsized()); PlaceRef { llval, @@ -55,46 +51,37 @@ impl PlaceRef<'tcx, &'ll Value> { } } - pub fn from_const_alloc( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + pub fn alloca>( + bx: &Bx, layout: TyLayout<'tcx>, - alloc: &mir::interpret::Allocation, - offset: Size, - ) -> PlaceRef<'tcx, &'ll Value> { - let init = const_alloc_to_llvm(bx.cx(), alloc); - let base_addr = bx.cx().static_addr_of(init, layout.align, None); - - let llval = unsafe { LLVMConstInBoundsGEP( - bx.cx().static_bitcast(base_addr, bx.cx().type_i8p()), - &bx.cx().const_usize(offset.bytes()), - 1, - )}; - let llval = bx.cx().static_bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))); - PlaceRef::new_sized(llval, layout, alloc.align) - } - - pub fn alloca(bx: &Builder<'a, 'll, 'tcx, &'ll Value>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'tcx, &'ll Value> { + name: &str + ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align); + let tmp = bx.alloca(bx.cx().backend_type(&layout), name, layout.align); Self::new_sized(tmp, layout, layout.align) } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + pub fn alloca_unsized_indirect>( + bx: &Bx, layout: TyLayout<'tcx>, name: &str - ) -> PlaceRef<'tcx, &'ll Value> { + ) -> PlaceRef<'tcx, V> + where &'a Bx::CodegenCx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx>, + Bx::CodegenCx : Backend + { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty); + let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); let ptr_layout = bx.cx().layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } - pub fn len(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Value { + pub fn len>( + &self, + cx: &Cx + ) -> V where Cx : Backend { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); @@ -220,17 +207,17 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { align: effective_field_align, } } -} - -impl PlaceRef<'tcx, &'ll Value> { /// Obtain the actual discriminant of a value. - pub fn codegen_get_discr( + pub fn codegen_get_discr>( self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, cast_to: Ty<'tcx> - ) -> &'ll Value { - let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx()); + ) -> V where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + let cast_to = bx.cx().immediate_backend_type(&bx.cx().layout_of(cast_to)); if self.layout.abi.is_uninhabited() { return bx.cx().const_undef(cast_to); } @@ -238,7 +225,7 @@ impl PlaceRef<'tcx, &'ll Value> { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index as u128, - |def| def.discriminant_for_variant(bx.cx().tcx, index).val); + |def| def.discriminant_for_variant(*bx.cx().tcx(), index).val); return bx.cx().const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | @@ -266,7 +253,7 @@ impl PlaceRef<'tcx, &'ll Value> { niche_start, .. } => { - let niche_llty = discr.layout.immediate_llvm_type(bx.cx()); + let niche_llty = bx.cx().immediate_backend_type(&discr.layout); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { @@ -293,7 +280,14 @@ impl PlaceRef<'tcx, &'ll Value> { /// Set the discriminant for a new value of the given case of the given /// representation. - pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, variant_index: usize) { + pub fn codegen_set_discr>( + &self, + bx: &Bx, + variant_index: usize + ) where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { return; } @@ -307,7 +301,7 @@ impl PlaceRef<'tcx, &'ll Value> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to), + bx.cx().const_uint_big(bx.cx().backend_type(&ptr.layout), to), ptr.llval, ptr.align); } @@ -318,8 +312,8 @@ impl PlaceRef<'tcx, &'ll Value> { .. } => { if variant_index != dataful_variant { - if bx.sess().target.target.arch == "arm" || - bx.sess().target.target.arch == "aarch64" { + if bx.cx().sess().target.target.arch == "arm" || + bx.cx().sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. let llptr = bx.pointercast( @@ -334,7 +328,7 @@ impl PlaceRef<'tcx, &'ll Value> { } let niche = self.project_field(bx, 0); - let niche_llty = niche.layout.immediate_llvm_type(bx.cx()); + let niche_llty = bx.cx().immediate_backend_type(&niche.layout); let niche_value = ((variant_index - *niche_variants.start()) as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. @@ -349,9 +343,17 @@ impl PlaceRef<'tcx, &'ll Value> { } } } +} - pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, llindex: &'ll Value) - -> PlaceRef<'tcx, &'ll Value> { +impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { + pub fn project_index>( + &self, + bx: &Bx, + llindex: V + ) -> PlaceRef<'tcx, V> where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { PlaceRef { llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), llextra: None, @@ -360,36 +362,51 @@ impl PlaceRef<'tcx, &'ll Value> { } } - pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, variant_index: usize) - -> PlaceRef<'tcx, &'ll Value> { + pub fn project_downcast>( + &self, + bx: &Bx, + variant_index: usize + ) -> PlaceRef<'tcx, V> where + Bx::CodegenCx : Backend, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let mut downcast = *self; downcast.layout = self.layout.for_variant(bx.cx(), variant_index); // Cast to the appropriate variant struct type. - let variant_ty = downcast.layout.llvm_type(bx.cx()); + let variant_ty = bx.cx().backend_type(&downcast.layout); downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); downcast } +} - pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) { +impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { + pub fn storage_live>(&self, bx: &Bx) + where Bx::CodegenCx : Backend + { bx.lifetime_start(self.llval, self.layout.size); } - pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>) { + pub fn storage_dead>(&self, bx: &Bx) + where Bx::CodegenCx : Backend + { bx.lifetime_end(self.llval, self.layout.size); } } -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { - pub fn codegen_place(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - place: &mir::Place<'tcx>) - -> PlaceRef<'tcx, &'ll Value> { +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> + where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_place>( + &mut self, + bx: &Bx, + place: &mir::Place<'tcx> + ) -> PlaceRef<'tcx, Cx::Value> { debug!("codegen_place(place={:?})", place); let cx = bx.cx(); - let tcx = cx.tcx; + let tcx = cx.tcx(); if let mir::Place::Local(index) = *place { match self.locals[index] { @@ -397,7 +414,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { return place; } LocalRef::UnsizedPlace(place) => { - return bx.load_ref(&place).deref(&cx); + return bx.load_ref(&place).deref(cx); } LocalRef::Operand(..) => { bug!("using operand local {:?} as place", place); @@ -417,7 +434,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { mir::interpret::ConstValue::ByRef(_, alloc, offset) => { - PlaceRef::from_const_alloc(bx, layout, alloc, offset) + bx.cx().from_const_alloc(layout, alloc, offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -429,7 +446,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); let llval = bx.cx().const_undef( - bx.cx().type_ptr_to(layout.llvm_type(bx.cx())) + bx.cx().type_ptr_to(bx.cx().backend_type(&layout)) ); PlaceRef::new_sized(llval, layout, layout.align) } @@ -478,7 +495,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } - .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); + .projection_ty(*tcx, &projection.elem).to_ty(bx.tcx()); subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { @@ -489,7 +506,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx()))); + bx.cx().type_ptr_to(bx.cx().backend_type(&subslice.layout))); subslice } @@ -504,8 +521,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { - let tcx = self.cx.tcx; - let place_ty = place.ty(self.mir, tcx); - self.monomorphize(&place_ty.to_ty(tcx)) + let tcx = self.cx.tcx(); + let place_ty = place.ty(self.mir, *tcx); + self.monomorphize(&place_ty.to_ty(*tcx)) } } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 34120b9b59ae5..25228a09907f6 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -10,20 +10,17 @@ use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, TyLayout}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; use std::{u128, i128}; use base; -use builder::Builder; use callee; use common::{self, IntPredicate, RealPredicate}; use monomorphize; -use type_::Type; use type_of::LayoutLlvmExt; -use value::Value; use interfaces::*; @@ -31,13 +28,15 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { - pub fn codegen_rvalue(&mut self, - bx: Builder<'a, 'll, 'tcx, &'ll Value>, - dest: PlaceRef<'tcx, &'ll Value>, - rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx, &'ll Value> - { +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> + where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_rvalue>( + &mut self, + bx: Bx, + dest: PlaceRef<'tcx, Cx::Value>, + rvalue: &mir::Rvalue<'tcx> + ) -> Bx { debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue); @@ -177,11 +176,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - pub fn codegen_rvalue_unsized(&mut self, - bx: Builder<'a, 'll, 'tcx, &'ll Value>, - indirect_dest: PlaceRef<'tcx, &'ll Value>, + pub fn codegen_rvalue_unsized>(&mut self, + bx: Bx, + indirect_dest: PlaceRef<'tcx, Cx::Value>, rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx, &'ll Value> + -> Bx { debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", indirect_dest.llval, rvalue); @@ -197,11 +196,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - pub fn codegen_rvalue_operand( + pub fn codegen_rvalue_operand>( &mut self, - bx: Builder<'a, 'll, 'tcx, &'ll Value>, + bx: Bx, rvalue: &mir::Rvalue<'tcx> - ) -> (Builder<'a, 'll, 'tcx, &'ll Value>, OperandRef<'tcx, &'ll Value>) { + ) -> (Bx, OperandRef<'tcx, Cx::Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { @@ -214,7 +213,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") { + if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } @@ -230,8 +229,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(callee::get_fn(bx.cx(), instance)) + *bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(bx.cx().get_fn(instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -254,7 +253,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - cast.scalar_pair_element_llvm_type(bx.cx(), 0, true)); + bx.cx().scalar_pair_element_backend_type(&cast, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -273,12 +272,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { if let OperandValue::Pair(data_ptr, meta) = operand.val { if cast.is_llvm_scalar_pair() { let data_cast = bx.pointercast(data_ptr, - cast.scalar_pair_element_llvm_type(bx.cx(), 0, true)); + bx.cx().scalar_pair_element_backend_type(&cast, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = cast.immediate_llvm_type(bx.cx()); + let llcast_ty = bx.cx().immediate_backend_type(&cast); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -288,7 +287,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } mir::CastKind::Misc => { assert!(cast.is_llvm_immediate()); - let ll_t_out = cast.immediate_llvm_type(bx.cx()); + let ll_t_out = bx.cx().immediate_backend_type(&cast); if operand.layout.abi.is_uninhabited() { let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); return (bx, OperandRef { @@ -299,12 +298,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = operand.layout.immediate_llvm_type(bx.cx()); + let ll_t_in = bx.cx().immediate_backend_type(&operand.layout); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx().tcx, index) + .discriminant_for_variant(*bx.cx().tcx(), index) .val; let discr = bx.cx().const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { @@ -366,7 +365,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed); + let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => @@ -400,8 +399,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }; (bx, OperandRef { val, - layout: self.cx.layout_of(self.cx.tcx.mk_ref( - self.cx.tcx.types.re_erased, + layout: self.cx.layout_of(self.cx.tcx().mk_ref( + self.cx.tcx().types.re_erased, ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() } )), }) @@ -488,7 +487,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bx.cx().type_is_sized(ty)); - let val = bx.cx().const_usize(bx.cx().size_of(ty).bytes()); + let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); let tcx = bx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -498,21 +497,21 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let (size, align) = bx.cx().size_and_align_of(content_ty); + let (size, align) = bx.cx().layout_of(content_ty).size_and_align(); let llsize = bx.cx().const_usize(size.bytes()); let llalign = bx.cx().const_usize(align.abi()); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = box_layout.llvm_type(bx.cx()); + let llty_ptr = bx.cx().backend_type(&box_layout); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); + bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = callee::get_fn(bx.cx(), instance); + let r = bx.cx().get_fn(instance); let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr); let operand = OperandRef { @@ -529,41 +528,41 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::Rvalue::Aggregate(..) => { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, *self.cx.tcx()); (bx, OperandRef::new_zst(self.cx, self.cx.layout_of(self.monomorphize(&ty)))) } } } - fn evaluate_array_len( + fn evaluate_array_len>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, place: &mir::Place<'tcx>, - ) -> &'ll Value { + ) -> Cx::Value { // ZST are passed as operands and require special handling // because codegen_place() panics if Local is operand. if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx().tcx); + let n = n.unwrap_usize(*bx.cx().tcx()); return bx.cx().const_usize(n); } } } // use common size calculation for non zero-sized types - let cg_value = self.codegen_place(&bx, place); + let cg_value = self.codegen_place(bx, place); return cg_value.len(bx.cx()); } - pub fn codegen_scalar_binop( + pub fn codegen_scalar_binop>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, + lhs: Cx::Value, + rhs: Cx::Value, input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Cx::Value { let is_float = input_ty.is_fp(); let is_signed = input_ty.is_signed(); let is_unit = input_ty.is_unit(); @@ -624,16 +623,16 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - pub fn codegen_fat_ptr_binop( + pub fn codegen_fat_ptr_binop>( &mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &Bx, op: mir::BinOp, - lhs_addr: &'ll Value, - lhs_extra: &'ll Value, - rhs_addr: &'ll Value, - rhs_extra: &'ll Value, + lhs_addr: Cx::Value, + lhs_extra: Cx::Value, + rhs_addr: Cx::Value, + rhs_extra: Cx::Value, _input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Cx::Value { match op { mir::BinOp::Eq => { bx.and( @@ -672,17 +671,19 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { } } - pub fn codegen_scalar_checked_binop(&mut self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, - input_ty: Ty<'tcx>) -> OperandValue<&'ll Value> { + pub fn codegen_scalar_checked_binop>( + &mut self, + bx: &Bx, + op: mir::BinOp, + lhs: Cx::Value, + rhs: Cx::Value, + input_ty: Ty<'tcx> + ) -> OperandValue { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx().check_overflow { + if !bx.cx().check_overflow() { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); return OperandValue::Pair(val, bx.cx().const_bool(false)); } @@ -705,7 +706,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { mir::BinOp::Shl | mir::BinOp::Shr => { let lhs_llty = bx.cx().val_ty(lhs); let rhs_llty = bx.cx().val_ty(rhs); - let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); + let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); @@ -720,7 +721,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { OperandValue::Pair(val, of) } +} +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { mir::Rvalue::Ref(..) | @@ -735,7 +740,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { true, mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, *self.cx.tcx()); let ty = self.monomorphize(&ty); self.cx.layout_of(ty).is_zst() } @@ -750,11 +755,11 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic( +fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( oop: OverflowOp, - bx: &Builder<'_, 'll, '_, &'ll Value>, + bx: &Bx, ty: Ty -) -> &'ll Value { +) -> ::Value { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; @@ -819,11 +824,13 @@ fn get_overflow_intrinsic( bx.cx().get_intrinsic(&name) } -fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, - signed: bool, - x: &'ll Value, - int_ty: &'ll Type, - float_ty: &'ll Type) -> &'ll Value { +fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + signed: bool, + x: ::Value, + int_ty: ::Type, + float_ty: ::Type +) -> ::Value { // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. @@ -851,18 +858,20 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_, &'ll Value>, } } -fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, - signed: bool, - x: &'ll Value, - float_ty: &'ll Type, - int_ty: &'ll Type) -> &'ll Value { +fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + signed: bool, + x: ::Value, + float_ty: ::Type, + int_ty: ::Type +) -> ::Value { let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; - if !bx.sess().opts.debugging_opts.saturating_float_casts { + if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the @@ -884,7 +893,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - let int_max = |signed: bool, int_ty: &'ll Type| -> u128 { + let int_max = |signed: bool, int_ty: ::Type| -> u128 { let shift_amount = 128 - bx.cx().int_width(int_ty); if signed { i128::MAX as u128 >> shift_amount @@ -892,7 +901,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, u128::MAX >> shift_amount } }; - let int_min = |signed: bool, int_ty: &'ll Type| -> i128 { + let int_min = |signed: bool, int_ty: ::Type| -> i128 { if signed { i128::MIN >> (128 - bx.cx().int_width(int_ty)) } else { @@ -900,14 +909,16 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_, &'ll Value>, } }; - let compute_clamp_bounds_single = |signed: bool, int_ty: &'ll Type| -> (u128, u128) { + let compute_clamp_bounds_single = + |signed: bool, int_ty: ::Type| -> (u128, u128) { let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero); assert!(rounded_max.value.is_finite()); (rounded_min.value.to_bits(), rounded_max.value.to_bits()) }; - let compute_clamp_bounds_double = |signed: bool, int_ty: &'ll Type| -> (u128, u128) { + let compute_clamp_bounds_double = + |signed: bool, int_ty: ::Type| -> (u128, u128) { let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero); diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs index 4e39fda1cce9c..087ceee2cbd92 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_llvm/mir/statement.rs @@ -10,19 +10,22 @@ use rustc::mir; -use asm; -use builder::Builder; - use super::FunctionCx; use super::LocalRef; use super::OperandValue; +use rustc::ty::Ty; +use rustc::ty::layout::{TyLayout, HasTyCtxt, LayoutOf}; +use interfaces::*; use value::Value; -impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { - pub fn codegen_statement(&mut self, - bx: Builder<'a, 'll, 'tcx, &'ll Value>, - statement: &mir::Statement<'tcx>) - -> Builder<'a, 'll, 'tcx, &'ll Value> { +impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> + where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_statement>( + &mut self, + bx: Bx, + statement: &mir::Statement<'tcx> + ) -> Bx { debug!("codegen_statement(statement={:?})", statement); self.set_debug_loc(&bx, statement.source_info); @@ -96,13 +99,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> { }); if input_vals.is_err() { - span_err!(bx.sess(), statement.source_info.span, E0669, + span_err!(bx.cx.sess(), statement.source_info.span, E0669, "invalid value for constraint in inline assembly"); } else { let input_vals = input_vals.unwrap(); let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals); if !res { - span_err!(bx.sess(), statement.source_info.span, E0668, + span_err!(bx.cx.sess(), statement.source_info.span, E0668, "malformed inline assembly"); } } diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index 169bb6df73d30..cb0a4dfecc301 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -14,11 +14,9 @@ //! item-path. This is used for unit testing the code that generates //! paths etc in all kinds of annoying scenarios. -use asm; use attributes; use base; use context::CodegenCx; -use declare; use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; @@ -30,7 +28,7 @@ use rustc::ty::TypeFoldable; use rustc::ty::layout::LayoutOf; use std::fmt; use value::Value; -use interfaces::StaticMethods; +use interfaces::*; pub use rustc::mir::mono::MonoItem; @@ -60,7 +58,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { MonoItem::GlobalAsm(node_id) => { let item = cx.tcx.hir.expect_item(node_id); if let hir::ItemKind::GlobalAsm(ref ga) = item.node { - asm::codegen_global_asm(cx, ga); + cx.codegen_global_asm(ga); } else { span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") } @@ -133,7 +131,7 @@ fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, let ty = instance.ty(cx.tcx); let llty = cx.layout_of(ty).llvm_type(cx); - let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| { + let g = cx.define_global(symbol_name, llty).unwrap_or_else(|| { cx.sess().span_fatal(cx.tcx.def_span(def_id), &format!("symbol `{}` is already defined", symbol_name)) }); @@ -156,7 +154,7 @@ fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, let mono_ty = instance.ty(cx.tcx); let attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); - let lldecl = declare::declare_fn(cx, symbol_name, mono_ty); + let lldecl = cx.declare_fn(symbol_name, mono_ty); unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; base::set_link_section(lldecl, &attrs); if linkage == Linkage::LinkOnceODR || diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 4be9beaa3d3f5..d8be1ad686030 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -24,9 +24,11 @@ use rustc::ty::layout::{self, Align, Size}; use rustc::util::nodemap::FxHashMap; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::TyLayout; +use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; use common::{self, TypeKind}; use type_of::LayoutLlvmExt; +use abi::{LlvmType, FnTypeExt}; use std::fmt; use std::cell::RefCell; @@ -421,6 +423,15 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { ) -> &'ll Type { ty.scalar_pair_element_llvm_type(&self, index, immediate) } + fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { + ty.llvm_type(&self) + } + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.llvm_type(&self) + } + fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { + ty.llvm_type(&self) + } } impl TypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index b2cd592f24b57..bda3dd793ad63 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -17,7 +17,7 @@ use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; use value::Value; -use interfaces::{BaseTypeMethods, DerivedTypeMethods}; +use interfaces::*; use std::fmt::Write; From da0cf8c9384ed13f0d0b02e764d604b7517bb89f Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 21 Sep 2018 11:53:04 +0200 Subject: [PATCH 48/76] Added 'll lifetime parameter to backend --- src/librustc_codegen_llvm/abi.rs | 4 +- src/librustc_codegen_llvm/base.rs | 58 +- src/librustc_codegen_llvm/callee.rs | 2 +- src/librustc_codegen_llvm/common.rs | 26 +- src/librustc_codegen_llvm/consts.rs | 2 +- src/librustc_codegen_llvm/context.rs | 4 +- src/librustc_codegen_llvm/declare.rs | 2 +- src/librustc_codegen_llvm/glue.rs | 4 +- src/librustc_codegen_llvm/interfaces/abi.rs | 2 +- src/librustc_codegen_llvm/interfaces/asm.rs | 4 +- .../interfaces/backend.rs | 4 +- .../interfaces/builder.rs | 616 +++++++++--------- .../interfaces/consts.rs | 2 +- .../interfaces/debuginfo.rs | 4 +- .../interfaces/declare.rs | 2 +- .../interfaces/intrinsic.rs | 6 +- src/librustc_codegen_llvm/interfaces/misc.rs | 2 +- src/librustc_codegen_llvm/interfaces/mod.rs | 6 +- .../interfaces/statics.rs | 2 +- src/librustc_codegen_llvm/interfaces/type_.rs | 20 +- src/librustc_codegen_llvm/meth.rs | 8 +- src/librustc_codegen_llvm/mir/mod.rs | 18 +- src/librustc_codegen_llvm/mir/operand.rs | 42 +- src/librustc_codegen_llvm/mir/place.rs | 30 +- src/librustc_codegen_llvm/mir/rvalue.rs | 26 +- src/librustc_codegen_llvm/type_.rs | 4 +- 26 files changed, 450 insertions(+), 450 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 34c008e126957..253979cd6be41 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -284,7 +284,7 @@ impl<'a, 'll: 'a, 'tcx: 'll> ArgTypeMethods<'a, 'll, 'tcx> for Builder<'a, 'll, fn store_fn_arg( &self, ty: &ArgType<'tcx, Ty<'tcx>>, - idx: &mut usize, dst: PlaceRef<'tcx, ::Value> + idx: &mut usize, dst: PlaceRef<'tcx, >::Value> ) { ty.store_fn_arg(&self, idx, dst) } @@ -797,7 +797,7 @@ impl AbiBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn apply_attrs_callsite( &self, ty: &FnType<'tcx, Ty<'tcx>>, - callsite: ::Value + callsite: >::Value ) { ty.apply_attrs_callsite(&self, callsite) } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index be9350503e21f..9d75587d74008 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -156,12 +156,12 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, - lhs: ::Value, - rhs: ::Value, + lhs: >::Value, + rhs: >::Value, t: Ty<'tcx>, - ret_ty: ::Type, + ret_ty: >::Type, op: hir::BinOpKind -) -> ::Value { +) -> >::Value { let signed = match t.sty { ty::Float(_) => { let cmp = bin_op_to_fcmp_predicate(op); @@ -218,10 +218,10 @@ pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, - src: ::Value, + src: >::Value, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> -) -> (::Value, ::Value) where +) -> (>::Value, >::Value) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); @@ -275,8 +275,8 @@ pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx> /// to a value of type `dst_ty` and store the result in `dst` pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, - src: PlaceRef<'tcx, ::Value>, - dst: PlaceRef<'tcx, ::Value> + src: PlaceRef<'tcx, >::Value>, + dst: PlaceRef<'tcx, >::Value> ) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let src_ty = src.layout.ty; @@ -336,28 +336,28 @@ pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, ' pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, op: hir::BinOpKind, - lhs: ::Value, - rhs: ::Value -) -> ::Value { + lhs: >::Value, + rhs: >::Value +) -> >::Value { cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) } fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, op: hir::BinOpKind, - lhs: ::Value, - rhs: ::Value, + lhs: >::Value, + rhs: >::Value, trunc: F, zext: G -) -> ::Value +) -> >::Value where F: FnOnce( - ::Value, - ::Type - ) -> ::Value, + >::Value, + >::Type + ) -> >::Value, G: FnOnce( - ::Value, - ::Type - ) -> ::Value + >::Value, + >::Type + ) -> >::Value { // Shifts may have any size int on the rhs if op.is_shift() { @@ -396,7 +396,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( bx: &Bx, - val: ::Value + val: >::Value ) { let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); bx.call(assume_intrinsic, &[val], None); @@ -404,8 +404,8 @@ pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( bx: &Bx, - val: ::Value -) -> ::Value { + val: >::Value +) -> >::Value { if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { @@ -415,9 +415,9 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx> pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, - val: ::Value, + val: >::Value, layout: layout::TyLayout, -) -> ::Value { +) -> >::Value { if let layout::Abi::Scalar(ref scalar) = layout.abi { return to_immediate_scalar(bx, val, scalar); } @@ -426,9 +426,9 @@ pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 't pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, - val: ::Value, + val: >::Value, scalar: &layout::Scalar, -) -> ::Value { +) -> >::Value { if scalar.is_bool() { return bx.trunc(val, bx.cx().type_i1()); } @@ -437,8 +437,8 @@ pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Builder : BuilderMethods<'a, pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( bx: &Builder, - dst: ::Value, - src: ::Value, + dst: >::Value, + src: >::Value, layout: TyLayout<'tcx>, align: Align, flags: MemFlags, diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index d0d0727a85959..63bd23e90255b 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -205,7 +205,7 @@ pub fn get_fn( } pub fn resolve_and_get_fn<'ll, 'tcx: 'll, - Cx : Backend + MiscMethods<'tcx> + TypeMethods<'ll, 'tcx> + Cx : Backend<'ll> + MiscMethods<'ll, 'tcx> + TypeMethods<'ll, 'tcx> >( cx: &Cx, def_id: DefId, diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 7cbfa1efd1701..386ef00c120b6 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -217,14 +217,14 @@ impl<'ll, V : CodegenObject> Funclet<'ll, V> { } } -impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> { +impl Backend<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; type Type = &'ll Type; type Context = &'ll llvm::Context; } -impl<'ll, 'tcx : 'll> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { +impl<'ll, 'tcx : 'll> ConstMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { // LLVM constant constructors. fn const_null(&self, t: &'ll Type) -> &'ll Value { @@ -547,9 +547,9 @@ pub fn langcall(tcx: TyCtxt, pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, - lhs: ::Value, - rhs: ::Value -) -> ::Value { + lhs: >::Value, + rhs: >::Value +) -> >::Value { let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bx, rhs); @@ -559,9 +559,9 @@ pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, lhs_t: Ty<'tcx>, - lhs: ::Value, - rhs: ::Value -) -> ::Value { + lhs: >::Value, + rhs: >::Value +) -> >::Value { let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bx, rhs); @@ -575,18 +575,18 @@ pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, - rhs: ::Value -) -> ::Value { + rhs: >::Value +) -> >::Value { let rhs_llty = bx.cx().val_ty(rhs); bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) } pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, - llty: ::Type, - mask_llty: ::Type, + llty: >::Type, + mask_llty: >::Type, invert: bool -) -> ::Value { +) -> >::Value { let kind = bx.cx().type_kind(llty); match kind { TypeKind::Integer => { diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 26d1b86b03c5e..bd29e887cabca 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -116,7 +116,7 @@ pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { } } -impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { +impl StaticMethods<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { fn static_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { ptrcast(val, ty) diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 3a1ffe03616d5..2cbbf6d90b63e 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -316,7 +316,7 @@ impl<'a, 'tcx, Value : Eq+Hash> CodegenCx<'a, 'tcx, Value> { } } -impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { +impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn vtables(&self) -> &RefCell, Option>), &'ll Value>> { @@ -422,7 +422,7 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} -impl IntrinsicDeclarationMethods for CodegenCx<'b, 'tcx, &'b Value> { +impl IntrinsicDeclarationMethods<'b> for CodegenCx<'b, 'tcx, &'b Value> { fn get_intrinsic(&self, key: &str) -> &'b Value { if let Some(v) = self.intrinsics.borrow().get(key).cloned() { return v; diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index dd76295df5931..ccde179f05310 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -96,7 +96,7 @@ fn declare_raw_fn( llfn } -impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { +impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { /// Declare a global value. /// diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index ea2be097bd782..49db1c7694861 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -25,8 +25,8 @@ pub fn size_and_align_of_dst<'a, 'll: 'a, 'tcx: 'll, >( bx: &Bx, t: Ty<'tcx>, - info: Option<::Value> -) -> (::Value, ::Value) where + info: Option<>::Value> +) -> (>::Value, >::Value) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("calculate size of DST: {}; with lost info: {:?}", diff --git a/src/librustc_codegen_llvm/interfaces/abi.rs b/src/librustc_codegen_llvm/interfaces/abi.rs index b41b20f36c211..48827455b7b4b 100644 --- a/src/librustc_codegen_llvm/interfaces/abi.rs +++ b/src/librustc_codegen_llvm/interfaces/abi.rs @@ -27,6 +27,6 @@ pub trait AbiBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> fn apply_attrs_callsite( &self, ty: &FnType<'tcx, Ty<'tcx>>, - callsite: ::Value + callsite: >::Value ); } diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_llvm/interfaces/asm.rs index 613f36eba5485..bdd822a43ba8e 100644 --- a/src/librustc_codegen_llvm/interfaces/asm.rs +++ b/src/librustc_codegen_llvm/interfaces/asm.rs @@ -17,8 +17,8 @@ pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ fn codegen_inline_asm( &self, ia: &InlineAsm, - outputs: Vec::Value>>, - inputs: Vec<::Value> + outputs: Vec>::Value>>, + inputs: Vec<>::Value> ); } diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index 023acbbe0b505..debce8560395e 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -10,8 +10,8 @@ use super::CodegenObject; -pub trait Backend { - type Value : CodegenObject; +pub trait Backend<'ll> { + type Value : 'll + CodegenObject; type BasicBlock : Copy; type Type : CodegenObject; type Context; diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 177ee390f1cfd..2b55beeb1dc8f 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -38,590 +38,590 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + { fn new_block<'b>( cx: &'a Self::CodegenCx, - llfn: ::Value, + llfn: >::Value, name: &'b str ) -> Self; fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; fn cx(&self) -> &'a Self::CodegenCx; fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn llfn(&self) -> ::Value; - fn llbb(&self) -> ::BasicBlock; + fn llfn(&self) -> >::Value; + fn llbb(&self) -> >::BasicBlock; fn count_insn(&self, category: &str); - fn set_value_name(&self, value: ::Value, name: &str); - fn position_at_end(&self, llbb: ::BasicBlock); - fn position_at_start(&self, llbb: ::BasicBlock); + fn set_value_name(&self, value: >::Value, name: &str); + fn position_at_end(&self, llbb: >::BasicBlock); + fn position_at_start(&self, llbb: >::BasicBlock); fn ret_void(&self); - fn ret(&self, v: ::Value); - fn br(&self, dest: ::BasicBlock); + fn ret(&self, v: >::Value); + fn br(&self, dest: >::BasicBlock); fn cond_br( &self, - cond: ::Value, - then_llbb: ::BasicBlock, - else_llbb: ::BasicBlock, + cond: >::Value, + then_llbb: >::BasicBlock, + else_llbb: >::BasicBlock, ); fn switch( &self, - v: ::Value, - else_llbb: ::BasicBlock, + v: >::Value, + else_llbb: >::BasicBlock, num_cases: usize, - ) -> ::Value; + ) -> >::Value; fn invoke( &self, - llfn: ::Value, - args: &[::Value], - then: ::BasicBlock, - catch: ::BasicBlock, - bundle: Option<&OperandBundleDef<'ll, ::Value>> - ) -> ::Value; + llfn: >::Value, + args: &[>::Value], + then: >::BasicBlock, + catch: >::BasicBlock, + bundle: Option<&OperandBundleDef<'ll, >::Value>> + ) -> >::Value; fn unreachable(&self); fn add( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fadd( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fadd_fast( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn sub( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fsub( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fsub_fast( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn mul( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fmul( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fmul_fast( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn udiv( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn exactudiv( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn sdiv( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn exactsdiv( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fdiv( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn fdiv_fast( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn urem( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn srem( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn frem( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn frem_fast( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn shl( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn lshr( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn ashr( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn and( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn or( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn xor( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; - fn neg(&self, v: ::Value) -> ::Value; - fn fneg(&self, v: ::Value) -> ::Value; - fn not(&self, v: ::Value) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn neg(&self, v: >::Value) -> >::Value; + fn fneg(&self, v: >::Value) -> >::Value; + fn not(&self, v: >::Value) -> >::Value; fn alloca( &self, - ty: ::Type, + ty: >::Type, name: &str, align: Align - ) -> ::Value; + ) -> >::Value; fn dynamic_alloca( &self, - ty: ::Type, + ty: >::Type, name: &str, align: Align - ) -> ::Value; + ) -> >::Value; fn array_alloca( &self, - ty: ::Type, - len: ::Value, + ty: >::Type, + len: >::Value, name: &str, align: Align - ) -> ::Value; + ) -> >::Value; fn load( &self, - ptr: ::Value, + ptr: >::Value, align: Align - ) -> ::Value; + ) -> >::Value; fn volatile_load( &self, - ptr: ::Value - ) -> ::Value; + ptr: >::Value + ) -> >::Value; fn atomic_load( &self, - ptr: ::Value, + ptr: >::Value, order: AtomicOrdering, align: Align - ) -> ::Value; + ) -> >::Value; fn load_ref( &self, - &PlaceRef<'tcx,::Value> - ) -> OperandRef<'tcx, ::Value>; + &PlaceRef<'tcx,>::Value> + ) -> OperandRef<'tcx, >::Value>; - fn range_metadata(&self, load: ::Value, range: Range); - fn nonnull_metadata(&self, load: ::Value); + fn range_metadata(&self, load: >::Value, range: Range); + fn nonnull_metadata(&self, load: >::Value); fn store( &self, - val: ::Value, - ptr: ::Value, + val: >::Value, + ptr: >::Value, align: Align - ) -> ::Value; + ) -> >::Value; fn atomic_store( &self, - val: ::Value, - ptr: ::Value, + val: >::Value, + ptr: >::Value, order: AtomicOrdering, align: Align ); fn store_with_flags( &self, - val: ::Value, - ptr: ::Value, + val: >::Value, + ptr: >::Value, align: Align, flags: MemFlags, - ) -> ::Value; + ) -> >::Value; fn gep( &self, - ptr: ::Value, - indices: &[::Value] - ) -> ::Value; + ptr: >::Value, + indices: &[>::Value] + ) -> >::Value; fn inbounds_gep( &self, - ptr: ::Value, - indices: &[::Value] - ) -> ::Value; + ptr: >::Value, + indices: &[>::Value] + ) -> >::Value; fn struct_gep( &self, - ptr: ::Value, + ptr: >::Value, idx: u64 - ) -> ::Value; + ) -> >::Value; fn trunc( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn sext( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn fptoui( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn fptosi( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn uitofp( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn sitofp( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn fptrunc( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn fpext( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn ptrtoint( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn inttoptr( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn bitcast( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn intcast( &self, - val: ::Value, - dest_ty: ::Type, is_signed: bool - ) -> ::Value; + val: >::Value, + dest_ty: >::Type, is_signed: bool + ) -> >::Value; fn pointercast( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; fn icmp( &self, op: IntPredicate, - lhs: ::Value, rhs: ::Value - ) -> ::Value; + lhs: >::Value, rhs: >::Value + ) -> >::Value; fn fcmp( &self, op: RealPredicate, - lhs: ::Value, rhs: ::Value - ) -> ::Value; + lhs: >::Value, rhs: >::Value + ) -> >::Value; fn empty_phi( &self, - ty: ::Type) -> ::Value; + ty: >::Type) -> >::Value; fn phi( &self, - ty: ::Type, - vals: &[::Value], - bbs: &[::BasicBlock] - ) -> ::Value; + ty: >::Type, + vals: &[>::Value], + bbs: &[>::BasicBlock] + ) -> >::Value; fn inline_asm_call( &self, asm: *const c_char, cons: *const c_char, - inputs: &[::Value], - output: ::Type, + inputs: &[>::Value], + output: >::Type, volatile: bool, alignstack: bool, dia: AsmDialect - ) -> ::Value; + ) -> >::Value; fn minnum( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn maxnum( &self, - lhs: ::Value, - rhs: ::Value - ) -> ::Value; + lhs: >::Value, + rhs: >::Value + ) -> >::Value; fn select( - &self, cond: ::Value, - then_val: ::Value, - else_val: ::Value, - ) -> ::Value; + &self, cond: >::Value, + then_val: >::Value, + else_val: >::Value, + ) -> >::Value; fn va_arg( &self, - list: ::Value, - ty: ::Type - ) -> ::Value; + list: >::Value, + ty: >::Type + ) -> >::Value; fn extract_element(&self, - vec: ::Value, - idx: ::Value - ) -> ::Value; + vec: >::Value, + idx: >::Value + ) -> >::Value; fn insert_element( - &self, vec: ::Value, - elt: ::Value, - idx: ::Value, - ) -> ::Value; + &self, vec: >::Value, + elt: >::Value, + idx: >::Value, + ) -> >::Value; fn shuffle_vector( &self, - v1: ::Value, - v2: ::Value, - mask: ::Value - ) -> ::Value; + v1: >::Value, + v2: >::Value, + mask: >::Value + ) -> >::Value; fn vector_splat( &self, num_elts: usize, - elt: ::Value - ) -> ::Value; + elt: >::Value + ) -> >::Value; fn vector_reduce_fadd_fast( &self, - acc: ::Value, - src: ::Value - ) -> ::Value; + acc: >::Value, + src: >::Value + ) -> >::Value; fn vector_reduce_fmul_fast( &self, - acc: ::Value, - src: ::Value - ) -> ::Value; + acc: >::Value, + src: >::Value + ) -> >::Value; fn vector_reduce_add( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_mul( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_and( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_or( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_xor( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_fmin( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_fmax( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_fmin_fast( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_fmax_fast( &self, - src: ::Value - ) -> ::Value; + src: >::Value + ) -> >::Value; fn vector_reduce_min( &self, - src: ::Value, + src: >::Value, is_signed: bool - ) -> ::Value; + ) -> >::Value; fn vector_reduce_max( &self, - src: ::Value, + src: >::Value, is_signed: bool - ) -> ::Value; + ) -> >::Value; fn extract_value( &self, - agg_val: ::Value, + agg_val: >::Value, idx: u64 - ) -> ::Value; + ) -> >::Value; fn insert_value( &self, - agg_val: ::Value, - elt: ::Value, + agg_val: >::Value, + elt: >::Value, idx: u64 - ) -> ::Value; + ) -> >::Value; fn landing_pad( &self, - ty: ::Type, - pers_fn: ::Value, + ty: >::Type, + pers_fn: >::Value, num_clauses: usize - ) -> ::Value; + ) -> >::Value; fn add_clause( &self, - landing_pad: ::Value, - clause: ::Value + landing_pad: >::Value, + clause: >::Value ); fn set_cleanup( &self, - landing_pad: ::Value + landing_pad: >::Value ); fn resume( &self, - exn: ::Value - ) -> ::Value; + exn: >::Value + ) -> >::Value; fn cleanup_pad( &self, - parent: Option<::Value>, - args: &[::Value] - ) -> ::Value; + parent: Option<>::Value>, + args: &[>::Value] + ) -> >::Value; fn cleanup_ret( - &self, cleanup: ::Value, - unwind: Option<::BasicBlock>, - ) -> ::Value; + &self, cleanup: >::Value, + unwind: Option<>::BasicBlock>, + ) -> >::Value; fn catch_pad( &self, - parent: ::Value, - args: &[::Value] - ) -> ::Value; + parent: >::Value, + args: &[>::Value] + ) -> >::Value; fn catch_ret( &self, - pad: ::Value, - unwind: ::BasicBlock - ) -> ::Value; + pad: >::Value, + unwind: >::BasicBlock + ) -> >::Value; fn catch_switch( &self, - parent: Option<::Value>, - unwind: Option<::BasicBlock>, + parent: Option<>::Value>, + unwind: Option<>::BasicBlock>, num_handlers: usize, - ) -> ::Value; + ) -> >::Value; fn add_handler( &self, - catch_switch: ::Value, - handler: ::BasicBlock + catch_switch: >::Value, + handler: >::BasicBlock ); - fn set_personality_fn(&self, personality: ::Value); + fn set_personality_fn(&self, personality: >::Value); fn atomic_cmpxchg( &self, - dst: ::Value, - cmp: ::Value, - src: ::Value, + dst: >::Value, + cmp: >::Value, + src: >::Value, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool, - ) -> ::Value; + ) -> >::Value; fn atomic_rmw( &self, op: AtomicRmwBinOp, - dst: ::Value, - src: ::Value, + dst: >::Value, + src: >::Value, order: AtomicOrdering, - ) -> ::Value; + ) -> >::Value; fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope); fn add_case( &self, - s: ::Value, - on_val: ::Value, - dest: ::BasicBlock + s: >::Value, + on_val: >::Value, + dest: >::BasicBlock ); fn add_incoming_to_phi( &self, - phi: ::Value, - val: ::Value, - bb: ::BasicBlock + phi: >::Value, + val: >::Value, + bb: >::BasicBlock ); - fn set_invariant_load(&self, load: ::Value); + fn set_invariant_load(&self, load: >::Value); fn check_store( &self, - val: ::Value, - ptr: ::Value - ) -> ::Value; + val: >::Value, + ptr: >::Value + ) -> >::Value; fn check_call<'b>( &self, typ: &str, - llfn: ::Value, - args: &'b [::Value] - ) -> Cow<'b, [::Value]> - where [::Value] : ToOwned; - fn lifetime_start(&self, ptr: ::Value, size: Size); - fn lifetime_end(&self, ptr: ::Value, size: Size); + llfn: >::Value, + args: &'b [>::Value] + ) -> Cow<'b, [>::Value]> + where [>::Value] : ToOwned; + fn lifetime_start(&self, ptr: >::Value, size: Size); + fn lifetime_end(&self, ptr: >::Value, size: Size); fn call_lifetime_intrinsic( &self, intrinsic: &str, - ptr: ::Value, size: Size + ptr: >::Value, size: Size ); fn call( &self, - llfn: ::Value, - args: &[::Value], - bundle: Option<&OperandBundleDef<'ll, ::Value>> - ) -> ::Value; + llfn: >::Value, + args: &[>::Value], + bundle: Option<&OperandBundleDef<'ll, >::Value>> + ) -> >::Value; fn call_memcpy( &self, - dst: ::Value, - src: ::Value, - n_bytes: ::Value, + dst: >::Value, + src: >::Value, + n_bytes: >::Value, align: Align, flags: MemFlags, ); fn call_memset( &self, - ptr: ::Value, - fill_byte: ::Value, - size: ::Value, - align: ::Value, + ptr: >::Value, + fill_byte: >::Value, + size: >::Value, + align: >::Value, volatile: bool, - ) -> ::Value; + ) -> >::Value; fn zext( &self, - val: ::Value, - dest_ty: ::Type - ) -> ::Value; + val: >::Value, + dest_ty: >::Type + ) -> >::Value; - fn delete_basic_block(&self, bb: ::BasicBlock); - fn do_not_inline(&self, llret: ::Value); + fn delete_basic_block(&self, bb: >::BasicBlock); + fn do_not_inline(&self, llret: >::Value); } diff --git a/src/librustc_codegen_llvm/interfaces/consts.rs b/src/librustc_codegen_llvm/interfaces/consts.rs index f9f58afa56bac..9835e5c215375 100644 --- a/src/librustc_codegen_llvm/interfaces/consts.rs +++ b/src/librustc_codegen_llvm/interfaces/consts.rs @@ -15,7 +15,7 @@ use rustc::mir::interpret::Scalar; use rustc::mir::interpret::Allocation; use mir::place::PlaceRef; -pub trait ConstMethods<'tcx> : Backend { +pub trait ConstMethods<'ll, 'tcx: 'll> : Backend<'ll> { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; fn const_undef(&self, t: Self::Type) -> Self::Value; diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs index b72deba0040a0..e850c41500136 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -19,7 +19,7 @@ use syntax_pos; use rustc::hir::def_id::CrateNum; use syntax::ast::Name; -pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend { +pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { type DIScope : 'll + Copy; fn create_vtable_metadata( @@ -54,7 +54,7 @@ pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, variable_name: Name, variable_type: Ty<'tcx>, scope_metadata: >::DIScope, - variable_access: VariableAccess<'_, ::Value>, + variable_access: VariableAccess<'_, >::Value>, variable_kind: VariableKind, span: syntax_pos::Span, ); diff --git a/src/librustc_codegen_llvm/interfaces/declare.rs b/src/librustc_codegen_llvm/interfaces/declare.rs index 3e02ecabca38d..42d74febc2ff9 100644 --- a/src/librustc_codegen_llvm/interfaces/declare.rs +++ b/src/librustc_codegen_llvm/interfaces/declare.rs @@ -11,7 +11,7 @@ use rustc::ty::Ty; use super::backend::Backend; -pub trait DeclareMethods<'tcx> : Backend{ +pub trait DeclareMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn declare_global( &self, name: &str, ty: Self::Type diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs index 583cb8151a551..5de37c878a02e 100644 --- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs +++ b/src/librustc_codegen_llvm/interfaces/intrinsic.rs @@ -20,13 +20,13 @@ pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tc &self, callee_ty: Ty<'tcx>, fn_ty: &FnType<'tcx, Ty<'tcx>>, - args: &[OperandRef<'tcx, ::Value>], - llresult: ::Value, + args: &[OperandRef<'tcx, >::Value>], + llresult: >::Value, span: Span, ); } -pub trait IntrinsicDeclarationMethods : Backend { +pub trait IntrinsicDeclarationMethods<'ll> : Backend<'ll> { fn get_intrinsic(&self, key: &str) -> Self::Value; fn declare_intrinsic( &self, diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs index a00b48318566a..22a277fbd98b6 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -15,7 +15,7 @@ use super::backend::Backend; use rustc::session::Session; use libc::c_uint; -pub trait MiscMethods<'tcx> : Backend { +pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn vtables(&self) -> &RefCell, Option>), Self::Value>>; fn check_overflow(&self) -> bool; diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 8e8580d11d032..bba5243e7d006 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -36,8 +36,8 @@ pub use self::asm::{AsmMethods, AsmBuilderMethods}; use std::fmt; pub trait CodegenMethods<'ll, 'tcx: 'll> : - Backend + TypeMethods<'ll, 'tcx> + MiscMethods<'tcx> + ConstMethods<'tcx> + - StaticMethods<'tcx> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + - IntrinsicDeclarationMethods + DeclareMethods<'tcx> + AsmMethods {} + Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + + StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + + IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods {} pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_llvm/interfaces/statics.rs b/src/librustc_codegen_llvm/interfaces/statics.rs index a20b814749172..782bf67cb063a 100644 --- a/src/librustc_codegen_llvm/interfaces/statics.rs +++ b/src/librustc_codegen_llvm/interfaces/statics.rs @@ -12,7 +12,7 @@ use rustc::ty::layout::Align; use rustc::hir::def_id::DefId; use super::backend::Backend; -pub trait StaticMethods<'tcx> : Backend { +pub trait StaticMethods<'ll> : Backend<'ll> { fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; fn static_addr_of_mut( diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index e556465e40be0..45c84f520f19e 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -21,7 +21,7 @@ use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; use mir::place::PlaceRef; -pub trait BaseTypeMethods<'a, 'tcx: 'a> : Backend { +pub trait BaseTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn type_void(&self) -> Self::Type; fn type_metadata(&self) -> Self::Type; fn type_i1(&self) -> Self::Type; @@ -52,10 +52,10 @@ pub trait BaseTypeMethods<'a, 'tcx: 'a> : Backend { fn val_ty(&self, v: Self::Value) -> Self::Type; fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; - fn tcx(&self) -> &TyCtxt<'a, 'tcx, 'tcx>; + fn tcx(&self) -> &TyCtxt<'ll, 'tcx, 'tcx>; } -pub trait DerivedTypeMethods<'tcx> : Backend { +pub trait DerivedTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn type_bool(&self) -> Self::Type; fn type_char(&self) -> Self::Type; fn type_i8p(&self) -> Self::Type; @@ -87,7 +87,7 @@ pub trait DerivedTypeMethods<'tcx> : Backend { fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool; } -pub trait LayoutTypeMethods<'tcx> : Backend { +pub trait LayoutTypeMethods<'ll, 'tcx> : Backend<'ll> { fn backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; @@ -106,16 +106,16 @@ pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn store_fn_arg( &self, ty: &ArgType<'tcx, Ty<'tcx>>, - idx: &mut usize, dst: PlaceRef<'tcx, ::Value> + idx: &mut usize, dst: PlaceRef<'tcx, >::Value> ); fn store_arg_ty( &self, ty: &ArgType<'tcx, Ty<'tcx>>, - val: ::Value, - dst: PlaceRef<'tcx, ::Value> + val: >::Value, + dst: PlaceRef<'tcx, >::Value> ); - fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> ::Type; + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> >::Type; } -pub trait TypeMethods<'a, 'tcx: 'a> : - BaseTypeMethods<'a, 'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} +pub trait TypeMethods<'ll, 'tcx: 'll> : + BaseTypeMethods<'ll, 'tcx> + DerivedTypeMethods<'ll, 'tcx> + LayoutTypeMethods<'ll, 'tcx> {} diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs index 7f638ddef0b53..c52082fe2cff3 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -32,9 +32,9 @@ impl<'a, 'tcx> VirtualIndex { pub fn get_fn>( self, bx: &Bx, - llvtable: ::Value, + llvtable: >::Value, fn_ty: &FnType<'tcx, Ty<'tcx>> - ) -> ::Value { + ) -> >::Value { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); @@ -56,8 +56,8 @@ impl<'a, 'tcx> VirtualIndex { pub fn get_usize>( self, bx: &Bx, - llvtable: ::Value - ) -> ::Value { + llvtable: >::Value + ) -> >::Value { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 9f4f2f603aca9..51ae62ceafdc0 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -70,7 +70,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx /// When targeting MSVC, this stores the cleanup info for each funclet /// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets' /// head block in RPO. - funclets: &'a IndexVec>>, + funclets: &'ll IndexVec>>, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. @@ -196,11 +196,11 @@ enum LocalRef<'tcx, V> { Operand(Option>), } -impl<'ll, 'tcx: 'll, V : CodegenObject> LocalRef<'tcx, V> { +impl<'ll, 'tcx: 'll, V : 'll + CodegenObject> LocalRef<'tcx, V> { fn new_operand>( cx: &Cx, layout: TyLayout<'tcx> - ) -> LocalRef<'tcx, V> where Cx: Backend { + ) -> LocalRef<'tcx, V> where Cx: Backend<'ll, Value=V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -216,7 +216,7 @@ impl<'ll, 'tcx: 'll, V : CodegenObject> LocalRef<'tcx, V> { pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( cx: &'a Bx::CodegenCx, - llfn: ::Value, + llfn: >::Value, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, @@ -235,7 +235,7 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); - let block_bxs: IndexVec::BasicBlock> = + let block_bxs: IndexVec>::BasicBlock> = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK && !reentrant_start_block { bx.llbb() @@ -375,9 +375,9 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( mir: &'a Mir<'tcx>, bx: &Bx, cleanup_kinds: &IndexVec, - block_bxs: &IndexVec::BasicBlock>) - -> (IndexVec::BasicBlock>>, - IndexVec::Value>>>) + block_bxs: &IndexVec>::BasicBlock>) + -> (IndexVec>::BasicBlock>>, + IndexVec>::Value>>>) { block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { @@ -448,7 +448,7 @@ fn arg_local_refs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( debuginfo::MirDebugScope<>::DIScope> >, memory_locals: &BitSet, -) -> Vec::Value>> +) -> Vec>::Value>> where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { let mir = fx.mir; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index df441b6dd536b..8dd7015a12d21 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -65,11 +65,11 @@ impl fmt::Debug for OperandRef<'tcx, V> { } } -impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { pub fn new_zst>( cx: &Cx, layout: TyLayout<'tcx> - ) -> OperandRef<'tcx, V> where Cx : Backend { + ) -> OperandRef<'tcx, V> where Cx : Backend<'ll, Value = V> { assert!(layout.is_zst()); OperandRef { val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(&layout))), @@ -81,7 +81,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { bx: &Bx, val: &'tcx ty::Const<'tcx> ) -> Result, Lrc>> where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value = V>, &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let layout = bx.cx().layout_of(val.ty); @@ -135,7 +135,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { } } -impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { /// Asserts that this operand refers to a scalar and returns /// a reference to its value. pub fn immediate(self) -> V { @@ -149,7 +149,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { self, cx: &'a Cx ) -> PlaceRef<'tcx, V> where - Cx: Backend, + Cx: Backend<'ll, Value=V>, &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let projected_ty = self.layout.ty.builtin_deref(true) @@ -173,7 +173,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { pub fn immediate_or_packed_pair>( self, bx: &Bx - ) -> V where Bx::CodegenCx : Backend { + ) -> V where Bx::CodegenCx : Backend<'ll, Value=V> { if let OperandValue::Pair(a, b) = self.val { let llty = bx.cx().backend_type(&self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", @@ -191,10 +191,10 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. pub fn from_immediate_or_packed_pair>( bx: &Bx, - llval: ::Value, + llval: >::Value, layout: TyLayout<'tcx> - ) -> OperandRef<'tcx, ::Value> - where Bx::CodegenCx : Backend + ) -> OperandRef<'tcx, >::Value> + where Bx::CodegenCx : Backend<'ll, Value=V> { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", @@ -213,8 +213,8 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { pub fn extract_field>( &self, bx: &Bx, i: usize - ) -> OperandRef<'tcx, ::Value> where - Bx::CodegenCx : Backend, + ) -> OperandRef<'tcx, >::Value> where + Bx::CodegenCx : Backend<'ll, Value=V>, &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let field = self.layout.field(bx.cx(), i); @@ -274,12 +274,12 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> OperandRef<'tcx, V> { } } -impl OperandValue { - pub fn store<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { + pub fn store>( self, bx: &Bx, - dest: PlaceRef<'tcx, ::Value> - ) where Bx::CodegenCx : Backend { + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V> { self.store_with_flags(bx, dest, MemFlags::empty()); } } @@ -313,13 +313,13 @@ impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> { } } -impl OperandValue { - fn store_with_flags<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { + fn store_with_flags>( self, bx: &Bx, - dest: PlaceRef<'tcx, ::Value>, + dest: PlaceRef<'tcx, >::Value>, flags: MemFlags, - ) where Bx::CodegenCx : Backend { + ) where Bx::CodegenCx : Backend<'ll, Value = V> { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. @@ -347,12 +347,12 @@ impl OperandValue { } } } - pub fn store_unsized<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + pub fn store_unsized>( self, bx: &Bx, indirect_dest: PlaceRef<'tcx, V> ) where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value = V>, &'a Bx::CodegenCx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index c11fc9bcffd53..c38904712532d 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -36,7 +36,7 @@ pub struct PlaceRef<'tcx, V> { pub align: Align, } -impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized( llval: V, layout: TyLayout<'tcx>, @@ -55,7 +55,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { bx: &Bx, layout: TyLayout<'tcx>, name: &str - ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend { + ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V> { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); let tmp = bx.alloca(bx.cx().backend_type(&layout), name, layout.align); @@ -69,7 +69,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { name: &str ) -> PlaceRef<'tcx, V> where &'a Bx::CodegenCx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx>, - Bx::CodegenCx : Backend + Bx::CodegenCx : Backend<'ll, Value=V> { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); @@ -81,7 +81,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { pub fn len>( &self, cx: &Cx - ) -> V where Cx : Backend { + ) -> V where Cx : Backend<'ll, Value=V> { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); @@ -96,14 +96,14 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { } -impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. pub fn project_field>( self, bx: &Bx, ix: usize - ) -> PlaceRef<'tcx, ::Value> + ) -> PlaceRef<'tcx, >::Value> where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value = V>, &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let cx = bx.cx(); @@ -214,7 +214,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { bx: &Bx, cast_to: Ty<'tcx> ) -> V where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value = V>, &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let cast_to = bx.cx().immediate_backend_type(&bx.cx().layout_of(cast_to)); @@ -285,7 +285,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { bx: &Bx, variant_index: usize ) where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value=V>, &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { @@ -345,13 +345,13 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { } } -impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn project_index>( &self, bx: &Bx, llindex: V ) -> PlaceRef<'tcx, V> where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value=V>, &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { PlaceRef { @@ -367,7 +367,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { bx: &Bx, variant_index: usize ) -> PlaceRef<'tcx, V> where - Bx::CodegenCx : Backend, + Bx::CodegenCx : Backend<'ll, Value=V>, &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let mut downcast = *self; @@ -381,15 +381,15 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { } } -impl<'a, 'll: 'a, 'tcx: 'll, V : CodegenObject> PlaceRef<'tcx, V> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn storage_live>(&self, bx: &Bx) - where Bx::CodegenCx : Backend + where Bx::CodegenCx : Backend<'ll, Value = V> { bx.lifetime_start(self.llval, self.layout.size); } pub fn storage_dead>(&self, bx: &Bx) - where Bx::CodegenCx : Backend + where Bx::CodegenCx : Backend<'ll, Value = V> { bx.lifetime_end(self.llval, self.layout.size); } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 25228a09907f6..15ab6367727cf 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -759,7 +759,7 @@ fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 't oop: OverflowOp, bx: &Bx, ty: Ty -) -> ::Value { +) -> >::Value { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; @@ -827,10 +827,10 @@ fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 't fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, signed: bool, - x: ::Value, - int_ty: ::Type, - float_ty: ::Type -) -> ::Value { + x: >::Value, + int_ty: >::Type, + float_ty: >::Type +) -> >::Value { // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. @@ -861,10 +861,10 @@ fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, signed: bool, - x: ::Value, - float_ty: ::Type, - int_ty: ::Type -) -> ::Value { + x: >::Value, + float_ty: >::Type, + int_ty: >::Type +) -> >::Value { let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { @@ -893,7 +893,7 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - let int_max = |signed: bool, int_ty: ::Type| -> u128 { + let int_max = |signed: bool, int_ty: >::Type| -> u128 { let shift_amount = 128 - bx.cx().int_width(int_ty); if signed { i128::MAX as u128 >> shift_amount @@ -901,7 +901,7 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( u128::MAX >> shift_amount } }; - let int_min = |signed: bool, int_ty: ::Type| -> i128 { + let int_min = |signed: bool, int_ty: >::Type| -> i128 { if signed { i128::MIN >> (128 - bx.cx().int_width(int_ty)) } else { @@ -910,7 +910,7 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( }; let compute_clamp_bounds_single = - |signed: bool, int_ty: ::Type| -> (u128, u128) { + |signed: bool, int_ty: >::Type| -> (u128, u128) { let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero); @@ -918,7 +918,7 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( (rounded_min.value.to_bits(), rounded_max.value.to_bits()) }; let compute_clamp_bounds_double = - |signed: bool, int_ty: ::Type| -> (u128, u128) { + |signed: bool, int_ty: >::Type| -> (u128, u128) { let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero); diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index d8be1ad686030..2e29650b12a08 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -280,7 +280,7 @@ impl Type { } } -impl DerivedTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { +impl DerivedTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn type_bool(&self) -> &'ll Type { &self.type_i8() @@ -405,7 +405,7 @@ impl DerivedTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } -impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { +impl LayoutTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { ty.llvm_type(&self) } From 29f4d0e9557711b8c9cb3f1ddab6c0046694ef0a Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 21 Sep 2018 13:46:52 +0200 Subject: [PATCH 49/76] Added new lifetimes for Funclets in FunctionsCx -> compiles --- src/librustc_codegen_llvm/mir/analyze.rs | 16 ++++++++-------- src/librustc_codegen_llvm/mir/block.rs | 4 ++-- src/librustc_codegen_llvm/mir/constant.rs | 2 +- src/librustc_codegen_llvm/mir/mod.rs | 16 ++++++++-------- src/librustc_codegen_llvm/mir/operand.rs | 2 +- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 4 ++-- src/librustc_codegen_llvm/mir/statement.rs | 2 +- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs index 46e7d3edeb33f..107903ab1eaa2 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_llvm/mir/analyze.rs @@ -23,8 +23,8 @@ use type_of::LayoutLlvmExt; use super::FunctionCx; use interfaces::*; -pub fn non_ssa_locals<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( - fx: &FunctionCx<'a, 'll, 'tcx, Cx> +pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( + fx: &FunctionCx<'a, 'f, 'll, 'tcx, Cx> ) -> BitSet where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { @@ -56,8 +56,8 @@ pub fn non_ssa_locals<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx> analyzer.non_ssa_locals } -struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { - fx: &'mir FunctionCx<'a, 'll, 'tcx, Cx>, +struct LocalAnalyzer<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { + fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>, dominators: Dominators, non_ssa_locals: BitSet, // The location of the first visited direct assignment to each @@ -65,10 +65,10 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods first_assignment: IndexVec } -impl> LocalAnalyzer<'mir, 'a, 'll, 'tcx, Cx> +impl> LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { - fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx, Cx>) -> Self { + fn new(fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>) -> Self { let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); let mut analyzer = LocalAnalyzer { @@ -109,8 +109,8 @@ impl> LocalAnalyzer<'mir, 'a, 'll, 'tcx, Cx> } } -impl<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Visitor<'tcx> - for LocalAnalyzer<'mir, 'a, 'll, 'tcx, Cx> +impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Visitor<'tcx> + for LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn visit_assign(&mut self, diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index ca7bd793214db..5c3f21f807e87 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -31,8 +31,8 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> - FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_block>(&mut self, bb: mir::BasicBlock) diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 7d7b19b037df1..3cfa3aa0265d1 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -77,7 +77,7 @@ pub fn codegen_static_initializer( Ok((const_alloc_to_llvm(cx, alloc), alloc)) } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn fully_evaluate>( diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 51ae62ceafdc0..5076b80483ace 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -39,7 +39,7 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { +pub struct FunctionCx<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, @@ -70,7 +70,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx /// When targeting MSVC, this stores the cleanup info for each funclet /// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets' /// head block in RPO. - funclets: &'ll IndexVec>>, + funclets: &'f IndexVec>>, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. @@ -103,8 +103,8 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx param_substs: &'tcx Substs<'tcx>, } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> - FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> @@ -117,8 +117,8 @@ impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> - FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> { pub fn set_debug_loc>( &mut self, @@ -440,9 +440,9 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( /// Produce, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. -fn arg_local_refs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( +fn arg_local_refs<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &Bx, - fx: &FunctionCx<'a, 'll, 'tcx, Bx::CodegenCx>, + fx: &FunctionCx<'a, 'f, 'll, 'tcx, Bx::CodegenCx>, scopes: &IndexVec< mir::SourceScope, debuginfo::MirDebugScope<>::DIScope> diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 8dd7015a12d21..ccd090799ec96 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -384,7 +384,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { } } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> { fn maybe_codegen_consume_direct>( &mut self, bx: &Bx, diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index c38904712532d..b5c4a471a310c 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -395,7 +395,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { } } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_place>( diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 15ab6367727cf..c420c55ffac7d 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -28,7 +28,7 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_rvalue>( @@ -723,7 +723,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, } } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs index 087ceee2cbd92..6b2f4713d7723 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_llvm/mir/statement.rs @@ -18,7 +18,7 @@ use rustc::ty::layout::{TyLayout, HasTyCtxt, LayoutOf}; use interfaces::*; use value::Value; -impl<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'll, 'tcx, Cx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_statement>( From 88a5dea77fe34c2e3e3c8d95d9ef0d1449d36a7c Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 21 Sep 2018 14:31:54 +0200 Subject: [PATCH 50/76] Tidy too long lines --- src/librustc_codegen_llvm/base.rs | 9 +++++++-- src/librustc_codegen_llvm/debuginfo/mod.rs | 8 ++++++-- src/librustc_codegen_llvm/interfaces/builder.rs | 15 ++++++++++++--- src/librustc_codegen_llvm/intrinsic.rs | 6 ++++-- src/librustc_codegen_llvm/mir/analyze.rs | 11 +++++++---- src/librustc_codegen_llvm/mir/constant.rs | 5 +++-- src/librustc_codegen_llvm/mir/mod.rs | 3 ++- src/librustc_codegen_llvm/mir/operand.rs | 4 +++- src/librustc_codegen_llvm/mir/place.rs | 5 +++-- src/librustc_codegen_llvm/mir/rvalue.rs | 10 ++++++---- src/librustc_codegen_llvm/mir/statement.rs | 5 +++-- 11 files changed, 56 insertions(+), 25 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 9d75587d74008..e97aca65c05e2 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -451,7 +451,10 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx> bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); } -pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll>(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>) { +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll>( + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, + instance: Instance<'tcx> +) { let _s = if cx.sess().codegen_stats() { let mut instance_name = String::new(); DefPathBasedNames::new(cx.tcx, true, true) @@ -476,7 +479,9 @@ pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll>(cx: &'a CodegenCx<'ll, 'tcx, &'l cx.stats.borrow_mut().n_closures += 1; let mir = cx.tcx.instance_mir(instance.def); - mir::codegen_mir::<'a, 'll, 'tcx, Builder<'a, 'll, 'tcx, &'ll Value>>(cx, lldecl, &mir, instance, sig); + mir::codegen_mir::<'a, 'll, 'tcx, Builder<'a, 'll, 'tcx, &'ll Value>>( + cx, lldecl, &mir, instance, sig + ); } pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 917884f7a1022..1df0b1f36b074 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -474,7 +474,9 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V name_to_append_suffix_to.push_str(","); } - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); + let actual_type = cx.tcx.normalize_erasing_regions( + ParamEnv::reveal_all(), actual_type + ); // Add actual type name to <...> clause of function name let actual_type_name = compute_debuginfo_type_name(cx, actual_type, @@ -488,7 +490,9 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V let names = get_parameter_names(cx, generics); substs.iter().zip(names).filter_map(|(kind, name)| { if let UnpackedKind::Type(ty) = kind.unpack() { - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type = cx.tcx.normalize_erasing_regions( + ParamEnv::reveal_all(), ty + ); let actual_type_metadata = type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); let name = SmallCStr::new(&name.as_str()); diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 2b55beeb1dc8f..91e0216606ea6 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -201,9 +201,18 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + lhs: >::Value, rhs: >::Value ) -> >::Value; - fn neg(&self, v: >::Value) -> >::Value; - fn fneg(&self, v: >::Value) -> >::Value; - fn not(&self, v: >::Value) -> >::Value; + fn neg( + &self, + v: >::Value + ) -> >::Value; + fn fneg( + &self, + v: >::Value + ) -> >::Value; + fn not( + &self, + v: >::Value + ) -> >::Value; fn alloca( &self, diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 1a7247a0bb593..d919a2deb1e86 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -676,8 +676,10 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let val = match intr.definition { intrinsics::IntrinsicDef::Named(name) => { - let f = cx.declare_cfn( name, - cx.type_func(&inputs, outputs)); + let f = cx.declare_cfn( + name, + cx.type_func(&inputs, outputs) + ); self.call(f, &llargs, None) } }; diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs index 107903ab1eaa2..0fabcf7cfd6b0 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_llvm/mir/analyze.rs @@ -56,7 +56,10 @@ pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<' analyzer.non_ssa_locals } -struct LocalAnalyzer<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { +struct LocalAnalyzer< + 'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, + Cx: 'a + CodegenMethods<'ll, 'tcx> + > { fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>, dominators: Dominators, non_ssa_locals: BitSet, @@ -109,9 +112,9 @@ impl> LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, } } -impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Visitor<'tcx> - for LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> - where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> where + &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn visit_assign(&mut self, block: mir::BasicBlock, diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 3cfa3aa0265d1..94a6df294c57a 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -77,8 +77,9 @@ pub fn codegen_static_initializer( Ok((const_alloc_to_llvm(cx, alloc), alloc)) } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> - where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn fully_evaluate>( &mut self, diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 5076b80483ace..879d27ff361a2 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -283,7 +283,8 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( if let Some(name) = decl.name { // User variable let debug_scope = fx.scopes[decl.visibility_scope]; - let dbg = debug_scope.is_valid() && bx.cx().sess().opts.debuginfo == DebugInfo::Full; + let dbg = + debug_scope.is_valid() && bx.cx().sess().opts.debuginfo == DebugInfo::Full; if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index ccd090799ec96..11dc6cf61e7dd 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -384,7 +384,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> +{ fn maybe_codegen_consume_direct>( &mut self, bx: &Bx, diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index b5c4a471a310c..a2a578585da60 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -395,8 +395,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> - where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_place>( &mut self, diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index c420c55ffac7d..939b369bb7b5f 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -28,8 +28,9 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> - where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_rvalue>( &mut self, @@ -723,8 +724,9 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Functi } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> - where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs index 6b2f4713d7723..1e6e784d33b6c 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_llvm/mir/statement.rs @@ -18,8 +18,9 @@ use rustc::ty::layout::{TyLayout, HasTyCtxt, LayoutOf}; use interfaces::*; use value::Value; -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> - where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn codegen_statement>( &mut self, From 48af59f49e161de6502dc15f5ea48eaa6ffa116a Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 21 Sep 2018 16:13:15 +0200 Subject: [PATCH 51/76] Generalized mono_item.rs and base.rs:codegen_instance --- src/librustc_codegen_llvm/base.rs | 39 +++-- src/librustc_codegen_llvm/context.rs | 8 + .../interfaces/declare.rs | 20 +++ src/librustc_codegen_llvm/interfaces/misc.rs | 5 + src/librustc_codegen_llvm/interfaces/mod.rs | 5 +- src/librustc_codegen_llvm/mono_item.rs | 155 +++++++++--------- 6 files changed, 139 insertions(+), 93 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index e97aca65c05e2..d224471b474ef 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -89,28 +89,33 @@ use value::Value; use mir::operand::OperandValue; use rustc_codegen_utils::check_for_rustc_errors_attr; +use std::marker::PhantomData; -pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll> { - cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, +pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { + cx: &'a Cx, name: Option, istart: usize, + phantom: PhantomData<(&'ll (), &'tcx ())> } -impl StatRecorder<'a, 'll, 'tcx> { - pub fn new(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, name: String) -> Self { - let istart = cx.stats.borrow().n_llvm_insns; +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> StatRecorder<'a, 'll, 'tcx, Cx> { + pub fn new(cx: &'a Cx, name: String) -> Self { + let istart = cx.stats().borrow().n_llvm_insns; StatRecorder { cx, name: Some(name), istart, + phantom: PhantomData } } } -impl Drop for StatRecorder<'a, 'll, 'tcx> { +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Drop for + StatRecorder<'a, 'll, 'tcx, Cx> +{ fn drop(&mut self) { if self.cx.sess().codegen_stats() { - let mut stats = self.cx.stats.borrow_mut(); + let mut stats = self.cx.stats().borrow_mut(); let iend = stats.n_llvm_insns; stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); stats.n_fns += 1; @@ -451,13 +456,13 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx> bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); } -pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll>( - cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, instance: Instance<'tcx> -) { +) where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { let _s = if cx.sess().codegen_stats() { let mut instance_name = String::new(); - DefPathBasedNames::new(cx.tcx, true, true) + DefPathBasedNames::new(*cx.tcx(), true, true) .push_def_path(instance.def_id(), &mut instance_name); Some(StatRecorder::new(cx, instance_name)) } else { @@ -469,17 +474,17 @@ pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll>( // release builds. info!("codegen_instance({})", instance); - let fn_ty = instance.ty(cx.tcx); + let fn_ty = instance.ty(*cx.tcx()); let sig = common::ty_fn_sig(cx, fn_ty); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - let lldecl = cx.instances.borrow().get(&instance).cloned().unwrap_or_else(|| + let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(|| bug!("Instance `{:?}` not already declared", instance)); - cx.stats.borrow_mut().n_closures += 1; + cx.stats().borrow_mut().n_closures += 1; - let mir = cx.tcx.instance_mir(instance.def); - mir::codegen_mir::<'a, 'll, 'tcx, Builder<'a, 'll, 'tcx, &'ll Value>>( + let mir = cx.tcx().instance_mir(instance.def); + mir::codegen_mir::<'a, 'll, 'tcx, Bx>( cx, lldecl, &mir, instance, sig ); } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 2cbbf6d90b63e..5b30ae29743cc 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -418,6 +418,14 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn check_overflow(&self) -> bool { self.check_overflow } + + fn stats(&self) -> &RefCell { + &self.stats + } + + fn codegen_unit(&self) -> &Arc> { + &self.codegen_unit + } } impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_llvm/interfaces/declare.rs b/src/librustc_codegen_llvm/interfaces/declare.rs index 42d74febc2ff9..f3fcec0bb78df 100644 --- a/src/librustc_codegen_llvm/interfaces/declare.rs +++ b/src/librustc_codegen_llvm/interfaces/declare.rs @@ -10,6 +10,9 @@ use rustc::ty::Ty; use super::backend::Backend; +use rustc::hir::def_id::DefId; +use rustc::mir::mono::{Linkage, Visibility}; +use monomorphize::Instance; pub trait DeclareMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn declare_global( @@ -45,3 +48,20 @@ pub trait DeclareMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn get_declared_value(&self, name: &str) -> Option; fn get_defined_value(&self, name: &str) -> Option; } + +pub trait PreDefineMethods<'ll, 'tcx: 'll> : Backend<'ll> { + fn predefine_static( + &self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str + ); + fn predefine_fn( + &self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str + ); +} diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs index 22a277fbd98b6..57642da09f901 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -14,6 +14,9 @@ use rustc::ty::{Ty, self, Instance}; use super::backend::Backend; use rustc::session::Session; use libc::c_uint; +use rustc::mir::mono::Stats; +use std::sync::Arc; +use monomorphize::partitioning::CodegenUnit; pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn vtables(&self) -> &RefCell, @@ -25,4 +28,6 @@ pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn eh_personality(&self) -> Self::Value; fn eh_unwind_resume(&self) -> Self::Value; fn sess(&self) -> &Session; + fn stats(&self) -> &RefCell; + fn codegen_unit(&self) -> &Arc>; } diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index bba5243e7d006..3477437f4c0ba 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -30,7 +30,7 @@ pub use self::statics::StaticMethods; pub use self::misc::MiscMethods; pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; pub use self::abi::{AbiMethods, AbiBuilderMethods}; -pub use self::declare::DeclareMethods; +pub use self::declare::{DeclareMethods, PreDefineMethods}; pub use self::asm::{AsmMethods, AsmBuilderMethods}; use std::fmt; @@ -38,6 +38,7 @@ use std::fmt; pub trait CodegenMethods<'ll, 'tcx: 'll> : Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + - IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods {} + IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods + + PreDefineMethods<'ll, 'tcx> {} pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index cb0a4dfecc301..434191e338af5 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -24,26 +24,30 @@ use rustc::hir; use rustc::hir::def::Def; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; -use rustc::ty::TypeFoldable; -use rustc::ty::layout::LayoutOf; +use rustc::ty::{TypeFoldable, Ty}; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use std::fmt; use value::Value; +use builder::Builder; use interfaces::*; pub use rustc::mir::mono::MonoItem; pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; -pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { - fn define(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) { +pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>> : + fmt::Debug + BaseMonoItemExt<'ll, 'tcx> where + &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn define(&self, cx: &'a Bx::CodegenCx) { debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), + self.to_string(*cx.tcx()), self.to_raw_string(), - cx.codegen_unit.name()); + cx.codegen_unit().name()); match *self.as_mono_item() { MonoItem::Static(def_id) => { - let tcx = cx.tcx; + let tcx = *cx.tcx(); let is_mutable = match tcx.describe_def(def_id) { Some(Def::Static(_, is_mutable)) => is_mutable, Some(other) => { @@ -56,7 +60,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { cx.codegen_static(def_id, is_mutable); } MonoItem::GlobalAsm(node_id) => { - let item = cx.tcx.hir.expect_item(node_id); + let item = cx.tcx().hir.expect_item(node_id); if let hir::ItemKind::GlobalAsm(ref ga) = item.node { cx.codegen_global_asm(ga); } else { @@ -64,43 +68,43 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { } } MonoItem::Fn(instance) => { - base::codegen_instance(&cx, instance); + base::codegen_instance::<'a, 'll, 'tcx, Bx>(&cx, instance); } } debug!("END IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), + self.to_string(*cx.tcx()), self.to_raw_string(), - cx.codegen_unit.name()); + cx.codegen_unit().name()); } fn predefine(&self, - cx: &CodegenCx<'a, 'tcx, &'a Value>, + cx: &'a Bx::CodegenCx, linkage: Linkage, visibility: Visibility) { debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), + self.to_string(*cx.tcx()), self.to_raw_string(), - cx.codegen_unit.name()); + cx.codegen_unit().name()); - let symbol_name = self.symbol_name(cx.tcx).as_str(); + let symbol_name = self.symbol_name(*cx.tcx()).as_str(); debug!("symbol {}", &symbol_name); match *self.as_mono_item() { MonoItem::Static(def_id) => { - predefine_static(cx, def_id, linkage, visibility, &symbol_name); + cx.predefine_static(def_id, linkage, visibility, &symbol_name); } MonoItem::Fn(instance) => { - predefine_fn(cx, instance, linkage, visibility, &symbol_name); + cx.predefine_fn(instance, linkage, visibility, &symbol_name); } MonoItem::GlobalAsm(..) => {} } debug!("END PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), + self.to_string(*cx.tcx()), self.to_raw_string(), - cx.codegen_unit.name()); + cx.codegen_unit().name()); } fn to_raw_string(&self) -> String { @@ -120,68 +124,71 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { } } -impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} +impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx, Builder<'a, 'll, 'tcx, &'ll Value>> + for MonoItem<'tcx> {} -fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, - def_id: DefId, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - - let g = cx.define_global(symbol_name, llty).unwrap_or_else(|| { - cx.sess().span_fatal(cx.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name)) - }); - - unsafe { - llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); - llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); - } +impl<'ll, 'tcx: 'll> PreDefineMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn predefine_static(&self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); - cx.instances.borrow_mut().insert(instance, g); -} + let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { + self.sess().span_fatal(self.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); -fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, - instance: Instance<'tcx>, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - assert!(!instance.substs.needs_infer() && - !instance.substs.has_param_types()); - - let mono_ty = instance.ty(cx.tcx); - let attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); - let lldecl = cx.declare_fn(symbol_name, mono_ty); - unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; - base::set_link_section(lldecl, &attrs); - if linkage == Linkage::LinkOnceODR || - linkage == Linkage::WeakODR { - llvm::SetUniqueComdat(cx.llmod, lldecl); + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); + } + + self.instances.borrow_mut().insert(instance, g); } - // If we're compiling the compiler-builtins crate, e.g. the equivalent of - // compiler-rt, then we want to implicitly compile everything with hidden - // visibility as we're going to link this object all over the place but - // don't want the symbols to get exported. - if linkage != Linkage::Internal && linkage != Linkage::Private && - cx.tcx.is_compiler_builtins(LOCAL_CRATE) { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + fn predefine_fn(&self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let mono_ty = instance.ty(self.tcx); + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + let lldecl = self.declare_fn(symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(self.llmod, lldecl); } - } else { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + self.tcx.is_compiler_builtins(LOCAL_CRATE) { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + } + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + } } - } - debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); - if instance.def.is_inline(cx.tcx) { - attributes::inline(cx, lldecl, attributes::InlineAttr::Hint); - } - attributes::from_fn_attrs(cx, lldecl, Some(instance.def.def_id())); + debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); + if instance.def.is_inline(self.tcx) { + attributes::inline(self, lldecl, attributes::InlineAttr::Hint); + } + attributes::from_fn_attrs(self, lldecl, Some(instance.def.def_id())); - cx.instances.borrow_mut().insert(instance, lldecl); + self.instances.borrow_mut().insert(instance, lldecl); + } } From 8ea03316c4785a02f226fe81e96ea9f143d27608 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 24 Sep 2018 10:51:13 +0200 Subject: [PATCH 52/76] Adapt code to latest rustc master changes --- .../debuginfo/metadata.rs | 67 +------------------ src/librustc_codegen_llvm/debuginfo/mod.rs | 4 +- src/librustc_codegen_llvm/declare.rs | 2 +- 3 files changed, 4 insertions(+), 69 deletions(-) diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 2efc706f27467..c143a1914da63 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -1774,13 +1774,7 @@ pub fn create_vtable_metadata( llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), NO_SCOPE_METADATA, name.as_ptr(), - // LLVM 3.9 - // doesn't accept - // null here, so - // pass the name - // as the linkage - // name. - name.as_ptr(), + ptr::null(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, vtable_type, @@ -1806,62 +1800,3 @@ pub fn extend_scope_to_file( file_metadata) } } - -impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - - /// Creates debug information for the given vtable, which is for the - /// given type. - /// - /// Adds the created metadata nodes directly to the crate's IR. - fn create_vtable_metadata( - &self, - ty: ty::Ty<'tcx>, - vtable: &'ll Value, - ) { - if self.dbg_cx.is_none() { - return; - } - - let type_metadata = type_metadata(&self, ty, syntax_pos::DUMMY_SP); - - unsafe { - // LLVMRustDIBuilderCreateStructType() wants an empty array. A null - // pointer will lead to hard to trace and debug LLVM assertions - // later on in llvm/lib/IR/Value.cpp. - let empty_array = create_DIArray(DIB(&self), &[]); - - let name = const_cstr!("vtable"); - - // Create a new one each time. We don't want metadata caching - // here, because each vtable will refer to a unique containing - // type. - let vtable_type = llvm::LLVMRustDIBuilderCreateStructType( - DIB(&self), - NO_SCOPE_METADATA, - name.as_ptr(), - unknown_file_metadata(&self), - UNKNOWN_LINE_NUMBER, - Size::ZERO.bits(), - self.tcx.data_layout.pointer_align.abi_bits() as u32, - DIFlags::FlagArtificial, - None, - empty_array, - 0, - Some(type_metadata), - name.as_ptr() - ); - - llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(&self), - NO_SCOPE_METADATA, - name.as_ptr(), - ptr::null(), - unknown_file_metadata(&self), - UNKNOWN_LINE_NUMBER, - vtable_type, - true, - vtable, - None, - 0); - } - } -} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 1df0b1f36b074..0e73196b0f0a9 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -356,13 +356,13 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V let mut flags = DIFlags::FlagPrototyped; let local_id = self.tcx().hir.as_local_node_id(def_id); - if let Some((id, _, _)) = *cx.sess().entry_fn.borrow() { + if let Some((id, _, _)) = *self.sess().entry_fn.borrow() { if local_id == Some(id) { flags |= DIFlags::FlagMainSubprogram; } } - if cx.layout_of(sig.output()).abi.is_uninhabited() { + if self.layout_of(sig.output()).abi.is_uninhabited() { flags |= DIFlags::FlagNoReturn; } diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index ccde179f05310..112b8dc9e58c2 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -146,7 +146,7 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { let fty = FnType::new(self, sig, &[]); let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); - if self.layout_of(sig.output()).abi == layout::Abi::Uninhabited { + if self.layout_of(sig.output()).abi.is_uninhabited() { llvm::Attribute::NoReturn.apply_llfn(Function, llfn); } From 2e8af91b4b70d8dfa7f9c5958d91baa6b99fd9de Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 24 Sep 2018 15:26:39 +0200 Subject: [PATCH 53/76] Generalized base:maybe_create_entry_wrapper --- src/librustc_codegen_llvm/base.rs | 49 ++++++++++--------- src/librustc_codegen_llvm/context.rs | 8 +++ src/librustc_codegen_llvm/debuginfo/mod.rs | 3 ++ .../interfaces/debuginfo.rs | 1 + src/librustc_codegen_llvm/interfaces/misc.rs | 2 + 5 files changed, 39 insertions(+), 24 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index d224471b474ef..f1b0b2ea4b47b 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -30,7 +30,7 @@ use super::CachedModuleCodegen; use abi; use back::write::{self, OngoingCodegen}; -use llvm::{self, get_param}; +use llvm; use metadata; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; @@ -50,7 +50,6 @@ use rustc::session::Session; use rustc_incremental; use allocator; use mir::place::PlaceRef; -use attributes; use builder::{Builder, MemFlags}; use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; @@ -502,48 +501,50 @@ pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { /// Create the `main` function which will initialize the rust runtime and call /// users main function. -fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { +fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx +) { let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { Some((id, span, _)) => { - (cx.tcx.hir.local_def_id(id), span) + (cx.tcx().hir.local_def_id(id), span) } None => return, }; - let instance = Instance::mono(cx.tcx, main_def_id); + let instance = Instance::mono(*cx.tcx(), main_def_id); - if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) { + if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { // We want to create the wrapper in the same codegen unit as Rust's main // function. return; } - let main_llfn = callee::get_fn(cx, instance); + let main_llfn = cx.get_fn(instance); let et = cx.sess().entry_fn.get().map(|e| e.2); match et { - Some(EntryFnType::Main) => create_entry_fn(cx, span, main_llfn, main_def_id, true), - Some(EntryFnType::Start) => create_entry_fn(cx, span, main_llfn, main_def_id, false), + Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), + Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), None => {} // Do nothing. } - fn create_entry_fn( - cx: &CodegenCx<'ll, '_, &'ll Value>, + fn create_entry_fn<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, sp: Span, - rust_main: &'ll Value, + rust_main: >::Value, rust_main_def_id: DefId, use_start_lang_item: bool, ) { let llfty = cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); - let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); + let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); // Given that `main()` has no arguments, // then its return type cannot have // late-bound regions, since late-bound // regions must appear in the argument // listing. - let main_ret_ty = cx.tcx.erase_regions( + let main_ret_ty = cx.tcx().erase_regions( &main_ret_ty.no_late_bound_regions().unwrap(), ); @@ -558,25 +559,25 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx<'ll, '_, &'ll Value>) { let llfn = cx.declare_cfn("main", llfty); // `main` should respect same config for frame pointer elimination as rest of code - attributes::set_frame_pointer_elimination(cx, llfn); - attributes::apply_target_cpu_attr(cx, llfn); + cx.set_frame_pointer_elimination(llfn); + cx.apply_target_cpu_attr(llfn); - let bx = Builder::new_block(cx, llfn, "top"); + let bx = Bx::new_block(&cx, llfn, "top"); - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx); + bx.insert_reference_to_gdb_debug_scripts_section_global(); // Params from native main() used as args for rust start function - let param_argc = get_param(llfn, 0); - let param_argv = get_param(llfn, 1); - let arg_argc = bx.intcast(param_argc, cx.isize_ty, true); + let param_argc = cx.get_param(llfn, 0); + let param_argv = cx.get_param(llfn, 1); + let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); let arg_argv = param_argv; let (start_fn, args) = if use_start_lang_item { - let start_def_id = cx.tcx.require_lang_item(StartFnLangItem); + let start_def_id = cx.tcx().require_lang_item(StartFnLangItem); let start_fn = callee::resolve_and_get_fn( cx, start_def_id, - cx.tcx.intern_substs(&[main_ret_ty.into()]), + cx.tcx().intern_substs(&[main_ret_ty.into()]), ); (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv]) @@ -1212,7 +1213,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // If this codegen unit contains the main function, also create the // wrapper here - maybe_create_entry_wrapper(&cx); + maybe_create_entry_wrapper::>(&cx); // Run replace-all-uses-with for statics that need it for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 5b30ae29743cc..ca877f5a3450c 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -426,6 +426,14 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn codegen_unit(&self) -> &Arc> { &self.codegen_unit } + + fn set_frame_pointer_elimination(&self, llfn: &'ll Value) { + attributes::set_frame_pointer_elimination(self, llfn) + } + + fn apply_target_cpu_attr(&self, llfn: &'ll Value) { + attributes::apply_target_cpu_attr(self, llfn) + } } impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 0e73196b0f0a9..9f973687a6b3f 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -281,6 +281,9 @@ impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> ) { set_source_location(debug_context, &self, scope, span) } + fn insert_reference_to_gdb_debug_scripts_section_global(&self) { + gdb::insert_reference_to_gdb_debug_scripts_section_global(self) + } } impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs index e850c41500136..7e4941d5f71a1 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -64,4 +64,5 @@ pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, scope: Option<>::DIScope>, span: syntax_pos::Span, ); + fn insert_reference_to_gdb_debug_scripts_section_global(&self); } diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs index 57642da09f901..dceda584c2f17 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -30,4 +30,6 @@ pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn sess(&self) -> &Session; fn stats(&self) -> &RefCell; fn codegen_unit(&self) -> &Arc>; + fn set_frame_pointer_elimination(&self, llfn: Self::Value); + fn apply_target_cpu_attr(&self, llfn: Self::Value); } From 44609727361c4778d0d8d01c0cbdf5d42e3d6f6d Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 24 Sep 2018 17:35:39 +0200 Subject: [PATCH 54/76] Move doc to trait declarations --- src/librustc_codegen_llvm/asm.rs | 1 - src/librustc_codegen_llvm/base.rs | 68 +++++++++---------- src/librustc_codegen_llvm/builder.rs | 10 --- src/librustc_codegen_llvm/callee.rs | 4 +- src/librustc_codegen_llvm/common.rs | 6 -- src/librustc_codegen_llvm/context.rs | 3 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 6 -- src/librustc_codegen_llvm/declare.rs | 40 ----------- src/librustc_codegen_llvm/interfaces/asm.rs | 1 + .../interfaces/builder.rs | 12 ++++ .../interfaces/consts.rs | 8 +++ .../interfaces/debuginfo.rs | 8 +++ .../interfaces/declare.rs | 46 +++++++++++++ .../interfaces/intrinsic.rs | 6 ++ src/librustc_codegen_llvm/interfaces/type_.rs | 14 ++++ src/librustc_codegen_llvm/intrinsic.rs | 3 - src/librustc_codegen_llvm/type_.rs | 7 -- 17 files changed, 131 insertions(+), 112 deletions(-) diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 032b7d2b7d6df..914391226e2ce 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -25,7 +25,6 @@ use libc::{c_uint, c_char}; impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { - // Take an inline assembly expression and splat it out via LLVM fn codegen_inline_asm( &self, ia: &hir::InlineAsm, diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index f1b0b2ea4b47b..a97d096bb79c7 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -158,14 +158,14 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { } } -pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, - lhs: >::Value, - rhs: >::Value, +pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + lhs: >::Value, + rhs: >::Value, t: Ty<'tcx>, - ret_ty: >::Type, + ret_ty: >::Type, op: hir::BinOpKind -) -> >::Value { +) -> >::Value { let signed = match t.sty { ty::Float(_) => { let cmp = bin_op_to_fcmp_predicate(op); @@ -337,31 +337,31 @@ pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, ' } } -pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, +pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, op: hir::BinOpKind, - lhs: >::Value, - rhs: >::Value -) -> >::Value { + lhs: >::Value, + rhs: >::Value +) -> >::Value { cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) } -fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, +fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, op: hir::BinOpKind, - lhs: >::Value, - rhs: >::Value, + lhs: >::Value, + rhs: >::Value, trunc: F, zext: G -) -> >::Value +) -> >::Value where F: FnOnce( - >::Value, - >::Type - ) -> >::Value, + >::Value, + >::Type + ) -> >::Value, G: FnOnce( - >::Value, - >::Type - ) -> >::Value + >::Value, + >::Type + ) -> >::Value { // Shifts may have any size int on the rhs if op.is_shift() { @@ -417,32 +417,32 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx> } } -pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, - val: >::Value, +pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + val: >::Value, layout: layout::TyLayout, -) -> >::Value { +) -> >::Value { if let layout::Abi::Scalar(ref scalar) = layout.abi { return to_immediate_scalar(bx, val, scalar); } val } -pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, - val: >::Value, +pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + val: >::Value, scalar: &layout::Scalar, -) -> >::Value { +) -> >::Value { if scalar.is_bool() { return bx.trunc(val, bx.cx().type_i1()); } val } -pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Builder : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Builder, - dst: >::Value, - src: >::Value, +pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + dst: >::Value, + src: >::Value, layout: TyLayout<'tcx>, align: Align, flags: MemFlags, diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 6ec0e38c1caad..19eb5f8caf85f 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1232,7 +1232,6 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - /// Returns the ptr value that should be used for storing `val`. fn check_store<'b>(&self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { @@ -1252,7 +1251,6 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - /// Returns the args that should be used for a call to `llfn`. fn check_call<'b>(&self, typ: &str, llfn: &'ll Value, @@ -1303,14 +1301,6 @@ impl BuilderMethods<'a, 'll, 'tcx> self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } - /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations - /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - /// and the intrinsic for `lt` and passes them to `emit`, which is in - /// charge of generating code to call the passed intrinsic on whatever - /// block of generated code is targeted for the intrinsic. - /// - /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations - /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 63bd23e90255b..5df3440abff40 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -204,9 +204,7 @@ pub fn get_fn( llfn } -pub fn resolve_and_get_fn<'ll, 'tcx: 'll, - Cx : Backend<'ll> + MiscMethods<'ll, 'tcx> + TypeMethods<'ll, 'tcx> - >( +pub fn resolve_and_get_fn<'ll, 'tcx: 'll, Cx : CodegenMethods<'ll, 'tcx>>( cx: &Cx, def_id: DefId, substs: &'tcx Substs<'tcx>, diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 386ef00c120b6..45cc272753d76 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -226,7 +226,6 @@ impl Backend<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { impl<'ll, 'tcx : 'll> ConstMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - // LLVM constant constructors. fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) @@ -288,9 +287,6 @@ impl<'ll, 'tcx : 'll> ConstMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Valu &self.const_uint(&self.type_i8(), i as u64) } - - // This is a 'c-like' raw string, which differs from - // our boxed-and-length-annotated strings. fn const_cstr( &self, s: LocalInternedString, @@ -318,8 +314,6 @@ impl<'ll, 'tcx : 'll> ConstMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Valu } } - // NB: Do not use `do_spill_noroot` to make this into a constant string, or - // you will be kicked off fast isel. See issue #4352 for an example of this. fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { let len = s.len(); let cs = consts::ptrcast(&self.const_cstr(s, false), diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index ca877f5a3450c..6e45f920a5917 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -446,8 +446,7 @@ impl IntrinsicDeclarationMethods<'b> for CodegenCx<'b, 'tcx, &'b Value> { declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) } - - /// Declare any llvm intrinsics that you might need + fn declare_intrinsic( &self, key: &str diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 9f973687a6b3f..6e93039e273fa 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -290,12 +290,6 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V type DIScope = &'ll DIScope; - /// Creates the function-specific debug context. - /// - /// Returns the FunctionDebugContext for the function which holds state needed - /// for debug info creation. The function may also return another variant of the - /// FunctionDebugContext enum which indicates why no debuginfo should be created - /// for the function. fn create_function_debug_context( &self, instance: Instance<'tcx>, diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 112b8dc9e58c2..ab87490b74f5c 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -98,10 +98,6 @@ fn declare_raw_fn( impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - /// Declare a global value. - /// - /// If there’s a value with the same name already declared, the function will - /// return its Value instead. fn declare_global( &self, name: &str, ty: &'ll Type @@ -113,13 +109,6 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - /// Declare a C ABI function. - /// - /// Only use this for foreign function ABIs and glue. For Rust functions use - /// `declare_fn` instead. - /// - /// If there’s a value with the same name already declared, the function will - /// update the declaration and return existing Value instead. fn declare_cfn( &self, name: &str, @@ -128,11 +117,6 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { declare_raw_fn(self, name, llvm::CCallConv, fn_type) } - - /// Declare a Rust function. - /// - /// If there’s a value with the same name already declared, the function will - /// update the declaration and return existing Value instead. fn declare_fn( &self, name: &str, @@ -159,13 +143,6 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { llfn } - - /// Declare a global with an intention to define it. - /// - /// Use this function when you intend to define a global. This function will - /// return None if the name already has a definition associated with it. In that - /// case an error should be reported to the user, because it usually happens due - /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). fn define_global( &self, name: &str, @@ -178,20 +155,12 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - /// Declare a private global - /// - /// Use this function when you intend to define a global without a name. fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) } } - /// Declare a Rust function with an intention to define it. - /// - /// Use this function when you intend to define a function. This function will - /// return panic if the name already has a definition associated with it. This - /// can happen with #[no_mangle] or #[export_name], for example. fn define_fn( &self, name: &str, @@ -204,11 +173,6 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - /// Declare a Rust function with an intention to define it. - /// - /// Use this function when you intend to define a function. This function will - /// return panic if the name already has a definition associated with it. This - /// can happen with #[no_mangle] or #[export_name], for example. fn define_internal_fn( &self, name: &str, @@ -219,16 +183,12 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { llfn } - - /// Get declared value by name. fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { debug!("get_declared_value(name={:?})", name); let namebuf = SmallCStr::new(name); unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) } } - /// Get defined or externally defined (AvailableExternally linkage) value by - /// name. fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { self.get_declared_value(name).and_then(|val|{ let declaration = unsafe { diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_llvm/interfaces/asm.rs index bdd822a43ba8e..77b1b133f83af 100644 --- a/src/librustc_codegen_llvm/interfaces/asm.rs +++ b/src/librustc_codegen_llvm/interfaces/asm.rs @@ -14,6 +14,7 @@ use super::backend::Backend; use super::builder::HasCodegen; pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ + // Take an inline assembly expression and splat it out via LLVM fn codegen_inline_asm( &self, ia: &InlineAsm, diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 91e0216606ea6..b5c6b453b401c 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -579,11 +579,14 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ); fn set_invariant_load(&self, load: >::Value); + /// Returns the ptr value that should be used for storing `val`. fn check_store( &self, val: >::Value, ptr: >::Value ) -> >::Value; + + /// Returns the args that should be used for a call to `llfn`. fn check_call<'b>( &self, typ: &str, @@ -591,9 +594,18 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + args: &'b [>::Value] ) -> Cow<'b, [>::Value]> where [>::Value] : ToOwned; + fn lifetime_start(&self, ptr: >::Value, size: Size); fn lifetime_end(&self, ptr: >::Value, size: Size); + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targeted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). fn call_lifetime_intrinsic( &self, intrinsic: &str, diff --git a/src/librustc_codegen_llvm/interfaces/consts.rs b/src/librustc_codegen_llvm/interfaces/consts.rs index 9835e5c215375..9df3468c51cb4 100644 --- a/src/librustc_codegen_llvm/interfaces/consts.rs +++ b/src/librustc_codegen_llvm/interfaces/consts.rs @@ -17,6 +17,7 @@ use mir::place::PlaceRef; pub trait ConstMethods<'ll, 'tcx: 'll> : Backend<'ll> { // Constant constructors + fn const_null(&self, t: Self::Type) -> Self::Value; fn const_undef(&self, t: Self::Type) -> Self::Value; fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; @@ -28,12 +29,19 @@ pub trait ConstMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn const_u64(&self, i: u64) -> Self::Value; fn const_usize(&self, i: u64) -> Self::Value; fn const_u8(&self, i: u8) -> Self::Value; + + // This is a 'c-like' raw string, which differs from + // our boxed-and-length-annotated strings. fn const_cstr( &self, s: LocalInternedString, null_terminated: bool, ) -> Self::Value; + + // NB: Do not use `do_spill_noroot` to make this into a constant string, or + // you will be kicked off fast isel. See issue #4352 for an example of this. fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; + fn const_fat_ptr( &self, ptr: Self::Value, diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs index 7e4941d5f71a1..36bcb0b67ec55 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -27,6 +27,13 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { ty: Ty<'tcx>, vtable: Self::Value, ); + + /// Creates the function-specific debug context. + /// + /// Returns the FunctionDebugContext for the function which holds state needed + /// for debug info creation. The function may also return another variant of the + /// FunctionDebugContext enum which indicates why no debuginfo should be created + /// for the function. fn create_function_debug_context( &self, instance: Instance<'tcx>, @@ -34,6 +41,7 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { llfn: Self::Value, mir: &mir::Mir, ) -> FunctionDebugContext<'ll>; + fn create_mir_scopes( &self, mir: &mir::Mir, diff --git a/src/librustc_codegen_llvm/interfaces/declare.rs b/src/librustc_codegen_llvm/interfaces/declare.rs index f3fcec0bb78df..8b48796593770 100644 --- a/src/librustc_codegen_llvm/interfaces/declare.rs +++ b/src/librustc_codegen_llvm/interfaces/declare.rs @@ -15,37 +15,83 @@ use rustc::mir::mono::{Linkage, Visibility}; use monomorphize::Instance; pub trait DeclareMethods<'ll, 'tcx: 'll> : Backend<'ll> { + + /// Declare a global value. + /// + /// If there’s a value with the same name already declared, the function will + /// return its Value instead. fn declare_global( &self, name: &str, ty: Self::Type ) -> Self::Value; + + /// Declare a C ABI function. + /// + /// Only use this for foreign function ABIs and glue. For Rust functions use + /// `declare_fn` instead. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. fn declare_cfn( &self, name: &str, fn_type: Self::Type ) -> Self::Value; + + /// Declare a Rust function. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. fn declare_fn( &self, name: &str, fn_type: Ty<'tcx>, ) -> Self::Value; + + /// Declare a global with an intention to define it. + /// + /// Use this function when you intend to define a global. This function will + /// return None if the name already has a definition associated with it. In that + /// case an error should be reported to the user, because it usually happens due + /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). fn define_global( &self, name: &str, ty: Self::Type ) -> Option; + + /// Declare a private global + /// + /// Use this function when you intend to define a global without a name. fn define_private_global(&self, ty: Self::Type) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. fn define_fn( &self, name: &str, fn_type: Ty<'tcx>, ) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. fn define_internal_fn( &self, name: &str, fn_type: Ty<'tcx>, ) -> Self::Value; + + /// Get declared value by name. fn get_declared_value(&self, name: &str) -> Option; + + /// Get defined or externally defined (AvailableExternally linkage) value by + /// name. fn get_defined_value(&self, name: &str) -> Option; } diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs index 5de37c878a02e..1a8f4db900c81 100644 --- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs +++ b/src/librustc_codegen_llvm/interfaces/intrinsic.rs @@ -16,6 +16,10 @@ use abi::FnType; use syntax_pos::Span; pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { + + /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, + /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, + /// add them to librustc_codegen_llvm/context.rs fn codegen_intrinsic_call( &self, callee_ty: Ty<'tcx>, @@ -28,6 +32,8 @@ pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tc pub trait IntrinsicDeclarationMethods<'ll> : Backend<'ll> { fn get_intrinsic(&self, key: &str) -> Self::Value; + + /// Declare any llvm intrinsics that you might need fn declare_intrinsic( &self, key: &str diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 45c84f520f19e..266c14a4af22b 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -30,7 +30,10 @@ pub trait BaseTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn type_i32(&self) -> Self::Type; fn type_i64(&self) -> Self::Type; fn type_i128(&self) -> Self::Type; + + // Creates an integer type with the given number of bits, e.g. i24 fn type_ix(&self, num_bits: u64) -> Self::Type; + fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; fn type_x86_mmx(&self) -> Self::Type; @@ -45,9 +48,14 @@ pub trait BaseTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool); fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; + + /// Return the number of elements in `self` if it is a LLVM vector type. fn vector_length(&self, ty: Self::Type) -> usize; + fn func_params_types(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; + + /// Retrieve the bit width of the integer type `self`. fn int_width(&self, ty: Self::Type) -> u64; fn val_ty(&self, v: Self::Value) -> Self::Type; @@ -74,7 +82,13 @@ pub trait DerivedTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { t: ast::FloatTy ) -> Self::Type; fn type_from_integer(&self, i: layout::Integer) -> Self::Type; + + /// Return a LLVM type that has at most the required alignment, + /// as a conservative approximation for unknown pointee types. fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type; + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. fn type_padding_filler( &self, size: Size, diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index d919a2deb1e86..3e875fd418d7c 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -87,9 +87,6 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Opti impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { - /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, - /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, - /// add them to librustc_codegen_llvm/context.rs fn codegen_intrinsic_call( &self, callee_ty: Ty<'tcx>, diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 2e29650b12a08..0d6ba8953172d 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -103,7 +103,6 @@ impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - // Creates an integer type with the given number of bits, e.g. i24 fn type_ix(&self, num_bits: u64) -> &'ll Type { unsafe { llvm::LLVMIntTypeInContext(&self.llcx, num_bits as c_uint) @@ -205,7 +204,6 @@ impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - /// Return the number of elements in `self` if it is a LLVM vector type. fn vector_length(&self, ty: &'ll Type) -> usize { unsafe { llvm::LLVMGetVectorSize(ty) as usize @@ -232,7 +230,6 @@ impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - /// Retrieve the bit width of the integer type `self`. fn int_width(&self, ty: &'ll Type) -> u64 { unsafe { llvm::LLVMGetIntTypeWidth(ty) as u64 @@ -356,16 +353,12 @@ impl DerivedTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } - /// Return a LLVM type that has at most the required alignment, - /// as a conservative approximation for unknown pointee types. fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = layout::Integer::approximate_abi_align(self, align); &self.type_from_integer(ity) } - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. fn type_padding_filler( &self, size: Size, From df5a57c2a77c1aaaf8e77fa1698a4c45c21d43ec Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 25 Sep 2018 17:52:03 +0200 Subject: [PATCH 55/76] Generalized base:codegen_crate --- src/librustc_codegen_llvm/back/lto.rs | 15 ++-- src/librustc_codegen_llvm/back/write.rs | 22 +++--- src/librustc_codegen_llvm/base.rs | 62 ++++++++-------- .../interfaces/backend.rs | 41 +++++++++++ src/librustc_codegen_llvm/interfaces/mod.rs | 2 +- src/librustc_codegen_llvm/lib.rs | 70 +++++++++++++++++-- 6 files changed, 155 insertions(+), 57 deletions(-) diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 61856236a1491..a8192731b9a71 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -14,8 +14,7 @@ use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; use back::write::{self, DiagnosticHandlers, pre_lto_bitcode_filename}; use errors::{FatalError, Handler}; use llvm::archive_ro::ArchiveRO; -use llvm::{True, False}; -use llvm; +use llvm::{self, True, False}; use memmap; use rustc::dep_graph::WorkProduct; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; @@ -49,7 +48,7 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { pub(crate) enum LtoModuleCodegen { Fat { - module: Option, + module: Option>, _serialized_bitcode: Vec, }, @@ -73,7 +72,7 @@ impl LtoModuleCodegen { pub(crate) unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) - -> Result + -> Result, FatalError> { match *self { LtoModuleCodegen::Fat { ref mut module, .. } => { @@ -108,7 +107,7 @@ impl LtoModuleCodegen { /// the need optimization and another for modules that can simply be copied over /// from the incr. comp. cache. pub(crate) fn run(cgcx: &CodegenContext, - modules: Vec, + modules: Vec>, cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline) -> Result<(Vec, Vec), FatalError> @@ -232,7 +231,7 @@ pub(crate) fn run(cgcx: &CodegenContext, fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, - mut modules: Vec, + mut modules: Vec>, mut serialized_modules: Vec<(SerializedModule, CString)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) @@ -388,7 +387,7 @@ impl Drop for Linker<'a> { /// they all go out of scope. fn thin_lto(cgcx: &CodegenContext, diag_handler: &Handler, - modules: Vec, + modules: Vec>, serialized_modules: Vec<(SerializedModule, CString)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, symbol_white_list: &[*const libc::c_char], @@ -736,7 +735,7 @@ impl ThinModule { } unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) - -> Result + -> Result, FatalError> { let diag_handler = cgcx.create_diag_handler(); let tm = (cgcx.tm_factory)().map_err(|e| { diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 741e0840be167..4fccc693aebbd 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -29,7 +29,7 @@ use rustc::util::nodemap::FxHashMap; use time_graph::{self, TimeGraph, Timeline}; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; -use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, +use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, ModuleLlvm, CachedModuleCodegen}; use CrateInfo; use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; @@ -404,7 +404,7 @@ impl CodegenContext { } } - pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { + pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { if !self.save_temps { return } @@ -511,7 +511,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void // Unsafe due to LLVM calls. unsafe fn optimize(cgcx: &CodegenContext, diag_handler: &Handler, - module: &ModuleCodegen, + module: &ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result<(), FatalError> @@ -644,7 +644,7 @@ unsafe fn optimize(cgcx: &CodegenContext, } fn generate_lto_work(cgcx: &CodegenContext, - modules: Vec, + modules: Vec>, import_only_modules: Vec<(SerializedModule, WorkProduct)>) -> Vec<(WorkItem, u64)> { @@ -673,7 +673,7 @@ fn generate_lto_work(cgcx: &CodegenContext, unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, - module: ModuleCodegen, + module: ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result @@ -1288,7 +1288,7 @@ pub(crate) fn dump_incremental_data(_codegen_results: &CodegenResults) { enum WorkItem { /// Optimize a newly codegened, totally unoptimized module. - Optimize(ModuleCodegen), + Optimize(ModuleCodegen), /// Copy the post-LTO artifacts from the incremental cache to the output /// directory. CopyPostLtoArtifacts(CachedModuleCodegen), @@ -1316,7 +1316,7 @@ impl WorkItem { enum WorkItemResult { Compiled(CompiledModule), - NeedsLTO(ModuleCodegen), + NeedsLTO(ModuleCodegen), } fn execute_work_item(cgcx: &CodegenContext, @@ -1340,7 +1340,7 @@ fn execute_work_item(cgcx: &CodegenContext, } fn execute_optimize_work_item(cgcx: &CodegenContext, - module: ModuleCodegen, + module: ModuleCodegen, module_config: &ModuleConfig, timeline: &mut Timeline) -> Result @@ -1491,7 +1491,7 @@ fn execute_lto_work_item(cgcx: &CodegenContext, enum Message { Token(io::Result), NeedsLTO { - result: ModuleCodegen, + result: ModuleCodegen, worker_id: usize, }, Done { @@ -2431,7 +2431,7 @@ impl OngoingCodegen { pub(crate) fn submit_pre_codegened_module_to_llvm(&self, tcx: TyCtxt, - module: ModuleCodegen) { + module: ModuleCodegen) { self.wait_for_signal_to_codegen_item(); self.check_for_errors(tcx.sess); @@ -2465,7 +2465,7 @@ impl OngoingCodegen { } pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt, - module: ModuleCodegen, + module: ModuleCodegen, cost: u64) { let llvm_work_item = WorkItem::Optimize(module); drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index a97d096bb79c7..0f12fa27fceb4 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -29,7 +29,7 @@ use super::ModuleKind; use super::CachedModuleCodegen; use abi; -use back::write::{self, OngoingCodegen}; +use back::write; use llvm; use metadata; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; @@ -48,7 +48,6 @@ use rustc::util::profiling::ProfileCategory; use rustc::session::config::{self, DebugInfo, EntryFnType, Lto}; use rustc::session::Session; use rustc_incremental; -use allocator; use mir::place::PlaceRef; use builder::{Builder, MemFlags}; use callee; @@ -591,9 +590,10 @@ fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll } } -fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, - llvm_module: &ModuleLlvm) - -> EncodedMetadata { +pub(crate) fn write_metadata<'a, 'gcx>( + tcx: TyCtxt<'a, 'gcx, 'gcx>, + llvm_module: &ModuleLlvm +) -> EncodedMetadata { use std::io::Write; use flate2::Compression; use flate2::write::DeflateEncoder; @@ -720,21 +720,23 @@ fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - rx: mpsc::Receiver>) - -> OngoingCodegen -{ +pub fn codegen_crate<'a, 'tcx, B : BackendMethods>( + backend: B, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> +) -> B::OngoingCodegen { + check_for_rustc_errors_attr(tcx); if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { - if unsafe { !llvm::LLVMRustThinLTOAvailable() } { + if backend.thin_lto_available() { tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); } } if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && - unsafe { !llvm::LLVMRustPGOAvailable() } + backend.pgo_available() { tcx.sess.fatal("this compiler's LLVM does not support PGO"); } @@ -748,9 +750,9 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, &["crate"], Some("metadata")).as_str() .to_string(); - let metadata_llvm_module = ModuleLlvm::new(tcx.sess, &metadata_cgu_name); + let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); let metadata = time(tcx.sess, "write metadata", || { - write_metadata(tcx, &metadata_llvm_module) + backend.write_metadata(tcx, &metadata_llvm_module) }); tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); @@ -769,19 +771,19 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Skip crate items and just output metadata in -Z no-codegen mode. if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = write::start_async_codegen( + let ongoing_codegen = backend.start_async_codegen( tcx, time_graph.clone(), metadata, rx, 1); - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - ongoing_codegen.codegen_finished(tcx); + backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); + backend.codegen_finished(&ongoing_codegen, tcx); assert_and_save_dep_graph(tcx); - ongoing_codegen.check_for_errors(tcx.sess); + backend.check_for_errors(&ongoing_codegen, tcx.sess); return ongoing_codegen; } @@ -802,7 +804,7 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - let ongoing_codegen = write::start_async_codegen( + let ongoing_codegen = backend.start_async_codegen( tcx, time_graph.clone(), metadata, @@ -830,11 +832,9 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, &["crate"], Some("allocator")).as_str() .to_string(); - let modules = ModuleLlvm::new(tcx.sess, &llmod_id); + let modules = backend.new_metadata(tcx.sess, &llmod_id); time(tcx.sess, "write allocator module", || { - unsafe { - allocator::codegen(tcx, &modules, kind) - } + backend.codegen_allocator(tcx, &modules, kind) }); Some(ModuleCodegen { @@ -847,10 +847,10 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; if let Some(allocator_module) = allocator_module { - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); + backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, allocator_module); } - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); // We sort the codegen units by size. This way we can schedule work for LLVM // a bit more efficiently. @@ -864,8 +864,8 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut all_stats = Stats::default(); for cgu in codegen_units.into_iter() { - ongoing_codegen.wait_for_signal_to_codegen_item(); - ongoing_codegen.check_for_errors(tcx.sess); + backend.wait_for_signal_to_codegen_item(&ongoing_codegen); + backend.check_for_errors(&ongoing_codegen, tcx.sess); let cgu_reuse = determine_cgu_reuse(tcx, &cgu); tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); @@ -900,7 +900,7 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; } - ongoing_codegen.codegen_finished(tcx); + backend.codegen_finished(&ongoing_codegen, tcx); // Since the main thread is sometimes blocked during codegen, we keep track // -Ztime-passes output manually. @@ -934,7 +934,7 @@ pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - ongoing_codegen.check_for_errors(tcx.sess); + backend.check_for_errors(&ongoing_codegen, tcx.sess); assert_and_save_dep_graph(tcx); ongoing_codegen @@ -1192,7 +1192,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn module_codegen<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cgu_name: InternedString) - -> (Stats, ModuleCodegen) + -> (Stats, ModuleCodegen) { let cgu = tcx.codegen_unit(cgu_name); @@ -1343,9 +1343,9 @@ pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { mod temp_stable_hash_impls { use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable}; - use ModuleCodegen; + use {ModuleCodegen, ModuleLlvm}; - impl HashStable for ModuleCodegen { + impl HashStable for ModuleCodegen { fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) { diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index debce8560395e..227a955945f80 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -9,6 +9,14 @@ // except according to those terms. use super::CodegenObject; +use ModuleCodegen; +use rustc::session::Session; +use rustc::middle::cstore::EncodedMetadata; +use rustc::middle::allocator::AllocatorKind; +use rustc::ty::TyCtxt; +use time_graph::TimeGraph; +use std::sync::mpsc::Receiver; +use std::any::Any; pub trait Backend<'ll> { type Value : 'll + CodegenObject; @@ -16,3 +24,36 @@ pub trait Backend<'ll> { type Type : CodegenObject; type Context; } + +pub trait BackendMethods { + type Metadata; + type OngoingCodegen; + + fn thin_lto_available(&self) -> bool; + fn pgo_available(&self) -> bool; + fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Metadata; + fn write_metadata<'a, 'gcx>( + &self, + tcx: TyCtxt<'a, 'gcx, 'gcx>, + metadata: &Self::Metadata + ) -> EncodedMetadata; + fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Metadata, kind: AllocatorKind); + + fn start_async_codegen( + &self, + tcx: TyCtxt, + time_graph: Option, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize + ) -> Self::OngoingCodegen; + fn submit_pre_codegened_module_to_llvm( + &self, + codegen: &Self::OngoingCodegen, + tcx: TyCtxt, + module: ModuleCodegen + ); + fn codegen_finished(&self, codegen: &Self::OngoingCodegen, tcx: TyCtxt); + fn check_for_errors(&self, codegen: &Self::OngoingCodegen, sess: &Session); + fn wait_for_signal_to_codegen_item(&self, codegen: &Self::OngoingCodegen); +} diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 3477437f4c0ba..5bede58df6fde 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -21,7 +21,7 @@ mod declare; mod asm; pub use self::builder::{BuilderMethods, HasCodegen}; -pub use self::backend::Backend; +pub use self::backend::{Backend, BackendMethods}; pub use self::consts::ConstMethods; pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, ArgTypeMethods}; diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index dce11ec2faa55..2d87d42bed6e0 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -68,6 +68,10 @@ extern crate tempfile; extern crate memmap; use back::bytecode::RLIB_BYTECODE_EXTENSION; +use interfaces::*; +use time_graph::TimeGraph; +use std::sync::mpsc::Receiver; +use back::write::{self, OngoingCodegen}; pub use llvm_util::target_features; use std::any::Any; @@ -77,7 +81,8 @@ use rustc_data_structures::sync::Lrc; use rustc::dep_graph::DepGraph; use rustc::hir::def_id::CrateNum; -use rustc::middle::cstore::MetadataLoader; +use rustc::middle::allocator::AllocatorKind; +use rustc::middle::cstore::{EncodedMetadata, MetadataLoader}; use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; use rustc::middle::lang_items::LangItem; use rustc::session::{Session, CompileIncomplete}; @@ -137,6 +142,59 @@ mod value; pub struct LlvmCodegenBackend(()); +impl BackendMethods for LlvmCodegenBackend { + type Metadata = ModuleLlvm; + type OngoingCodegen = OngoingCodegen; + + fn thin_lto_available(&self) -> bool { + unsafe { !llvm::LLVMRustThinLTOAvailable() } + } + fn pgo_available(&self) -> bool { + unsafe { !llvm::LLVMRustPGOAvailable() } + } + fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm { + ModuleLlvm::new(sess, mod_name) + } + fn write_metadata<'a, 'gcx>( + &self, + tcx: TyCtxt<'a, 'gcx, 'gcx>, + metadata: &ModuleLlvm + ) -> EncodedMetadata { + base::write_metadata(tcx, metadata) + } + fn start_async_codegen( + &self, + tcx: TyCtxt, + time_graph: Option, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize + ) -> OngoingCodegen { + write::start_async_codegen(tcx, time_graph, metadata, coordinator_receive, total_cgus) + } + fn submit_pre_codegened_module_to_llvm( + &self, + codegen : &OngoingCodegen, + tcx: TyCtxt, + module: ModuleCodegen + ) { + codegen.submit_pre_codegened_module_to_llvm(tcx, module) + } + fn codegen_finished(&self, codegen : &OngoingCodegen, tcx: TyCtxt) { + codegen.codegen_finished(tcx) + } + fn check_for_errors(&self, codegen: &OngoingCodegen, sess: &Session) { + codegen.check_for_errors(sess) + } + fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { + unsafe { allocator::codegen(tcx, mods, kind) } + } + fn wait_for_signal_to_codegen_item(&self, codegen: &OngoingCodegen) { + codegen.wait_for_signal_to_codegen_item() + } +} + + impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis impl !Sync for LlvmCodegenBackend {} @@ -216,7 +274,7 @@ impl CodegenBackend for LlvmCodegenBackend { tcx: TyCtxt<'a, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> Box { - box base::codegen_crate(tcx, rx) + box base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) } fn join_codegen_and_link( @@ -269,7 +327,7 @@ pub fn __rustc_codegen_backend() -> Box { LlvmCodegenBackend::new() } -struct ModuleCodegen { +pub struct ModuleCodegen { /// The name of the module. When the crate may be saved between /// compilations, incremental compilation requires that name be /// unique amongst **all** crates. Therefore, it should contain @@ -277,7 +335,7 @@ struct ModuleCodegen { /// as the crate name and disambiguator. /// We currently generate these names via CodegenUnit::build_cgu_name(). name: String, - module_llvm: ModuleLlvm, + module_llvm: M, kind: ModuleKind, } @@ -293,7 +351,7 @@ enum ModuleKind { Allocator, } -impl ModuleCodegen { +impl ModuleCodegen { fn into_compiled_module(self, emit_obj: bool, emit_bc: bool, @@ -335,7 +393,7 @@ struct CompiledModule { bytecode_compressed: Option, } -struct ModuleLlvm { +pub struct ModuleLlvm { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, tm: &'static mut llvm::TargetMachine, From 440f2fbb5af5f5d8bca1dd168692ea7784957d54 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 26 Sep 2018 16:01:43 +0200 Subject: [PATCH 56/76] Renamed lifetimes for better understanding --- src/librustc_codegen_llvm/context.rs | 29 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 6e45f920a5917..56791ee376c04 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -46,14 +46,14 @@ use abi::Abi; /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub struct CodegenCx<'a, 'tcx: 'a, V> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub struct CodegenCx<'ll, 'tcx: 'll, V> { + pub tcx: TyCtxt<'ll, 'tcx, 'tcx>, pub check_overflow: bool, pub use_dll_storage_attrs: bool, pub tls_model: llvm::ThreadLocalMode, - pub llmod: &'a llvm::Module, - pub llcx: &'a llvm::Context, + pub llmod: &'ll llvm::Module, + pub llcx: &'ll llvm::Context, pub stats: RefCell, pub codegen_unit: Arc>, @@ -87,12 +87,12 @@ pub struct CodegenCx<'a, 'tcx: 'a, V> { /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details pub used_statics: RefCell>, - pub lltypes: RefCell, Option), &'a Type>>, - pub scalar_lltypes: RefCell, &'a Type>>, + pub lltypes: RefCell, Option), &'ll Type>>, + pub scalar_lltypes: RefCell, &'ll Type>>, pub pointee_infos: RefCell, Size), Option>>, - pub isize_ty: &'a Type, + pub isize_ty: &'ll Type, - pub dbg_cx: Option>, + pub dbg_cx: Option>, eh_personality: Cell>, eh_unwind_resume: Cell>, @@ -104,8 +104,7 @@ pub struct CodegenCx<'a, 'tcx: 'a, V> { local_gen_sym_counter: Cell, } -impl<'a, 'tcx, Value> DepGraphSafe for CodegenCx<'a, 'tcx, Value> { -} +impl<'ll, 'tcx, Value> DepGraphSafe for CodegenCx<'ll, 'tcx, Value> {} pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { @@ -218,11 +217,11 @@ pub unsafe fn create_module( llmod } -impl<'a, 'tcx, Value : Eq+Hash> CodegenCx<'a, 'tcx, Value> { - crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, +impl<'ll, 'tcx, Value : Eq+Hash> CodegenCx<'ll, 'tcx, Value> { + crate fn new(tcx: TyCtxt<'ll, 'tcx, 'tcx>, codegen_unit: Arc>, - llvm_module: &'a ::ModuleLlvm) - -> CodegenCx<'a, 'tcx, Value> { + llvm_module: &'ll ::ModuleLlvm) + -> CodegenCx<'ll, 'tcx, Value> { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these @@ -446,7 +445,7 @@ impl IntrinsicDeclarationMethods<'b> for CodegenCx<'b, 'tcx, &'b Value> { declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) } - + fn declare_intrinsic( &self, key: &str From 0c1097477c5ba40d49a05b7f72a1dfadc740d99a Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 26 Sep 2018 17:00:01 +0200 Subject: [PATCH 57/76] Preparing the generalization of base:compile_coodegen_unit --- src/librustc_codegen_llvm/base.rs | 59 +++++++------------ src/librustc_codegen_llvm/consts.rs | 7 +++ src/librustc_codegen_llvm/context.rs | 31 ++++++++++ src/librustc_codegen_llvm/debuginfo/mod.rs | 4 ++ .../interfaces/backend.rs | 16 ++++- .../interfaces/debuginfo.rs | 3 +- src/librustc_codegen_llvm/interfaces/misc.rs | 4 ++ .../interfaces/statics.rs | 1 + src/librustc_codegen_llvm/lib.rs | 20 ++++++- src/librustc_codegen_llvm/mono_item.rs | 23 ++++---- 10 files changed, 112 insertions(+), 56 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 0f12fa27fceb4..b90e3394e91bc 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -27,6 +27,7 @@ use super::ModuleLlvm; use super::ModuleCodegen; use super::ModuleKind; use super::CachedModuleCodegen; +use super::LlvmCodegenBackend; use abi; use back::write; @@ -54,8 +55,6 @@ use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; use common::{self, IntPredicate, RealPredicate, TypeKind}; -use context::CodegenCx; -use debuginfo; use meth; use mir; use monomorphize::Instance; @@ -720,9 +719,9 @@ fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn codegen_crate<'a, 'tcx, B : BackendMethods>( +pub fn codegen_crate<'a, 'll: 'a, 'tcx: 'll, B : BackendMethods<'a, 'll, 'tcx>>( backend: B, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> B::OngoingCodegen { @@ -940,7 +939,7 @@ pub fn codegen_crate<'a, 'tcx, B : BackendMethods>( ongoing_codegen } -fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { +fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { time(tcx.sess, "assert dep graph", || rustc_incremental::assert_dep_graph(tcx)); @@ -950,8 +949,8 @@ fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { || rustc_incremental::save_dep_graph(tcx)); } -fn collect_and_partition_mono_items<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn collect_and_partition_mono_items<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, cnum: CrateNum, ) -> (Arc, Arc>>>) { @@ -1167,7 +1166,7 @@ fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { all_mono_items.contains(&id) } -fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { let start_time = Instant::now(); @@ -1189,26 +1188,26 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, cost); return stats; - fn module_codegen<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn module_codegen<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> (Stats, ModuleCodegen) { + let backend = LlvmCodegenBackend(()); let cgu = tcx.codegen_unit(cgu_name); - // Instantiate monomorphizations without filling out definitions yet... - let llvm_module = ModuleLlvm::new(tcx.sess, &cgu_name.as_str()); + let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str()); let stats = { - let cx = CodegenCx::new(tcx, cgu, &llvm_module); + let cx = backend.new_codegen_context(tcx, cgu, &llvm_module); let mono_items = cx.codegen_unit .items_in_deterministic_order(cx.tcx); for &(mono_item, (linkage, visibility)) in &mono_items { - mono_item.predefine(&cx, linkage, visibility); + mono_item.predefine::>(&cx, linkage, visibility); } // ... and now that we have everything pre-defined, fill out those definitions. for &(mono_item, _) in &mono_items { - mono_item.define(&cx); + mono_item.define::>(&cx); } // If this codegen unit contains the main function, also create the @@ -1216,40 +1215,22 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, maybe_create_entry_wrapper::>(&cx); // Run replace-all-uses-with for statics that need it - for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { - unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); - llvm::LLVMDeleteGlobal(old_g); - } + for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { + cx.static_replace_all_uses(old_g, new_g) } // Create the llvm.used variable // This variable has type [N x i8*] and is stored in the llvm.metadata section - if !cx.used_statics.borrow().is_empty() { - let name = const_cstr!("llvm.used"); - let section = const_cstr!("llvm.metadata"); - let array = cx.const_array( - &cx.type_ptr_to(cx.type_i8()), - &*cx.used_statics.borrow() - ); - - unsafe { - let g = llvm::LLVMAddGlobal(cx.llmod, - cx.val_ty(array), - name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - } + if !cx.used_statics().borrow().is_empty() { + cx.create_used_variable() } // Finalize debuginfo if cx.sess().opts.debuginfo != DebugInfo::None { - debuginfo::finalize(&cx); + cx.debuginfo_finalize(); } - cx.stats.into_inner() + cx.consume_stats().into_inner() }; (stats, ModuleCodegen { diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index bd29e887cabca..bfa9e40f2190c 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -443,4 +443,11 @@ impl StaticMethods<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { } } } + fn static_replace_all_uses(&self, old_g: &'ll Value, new_g: &'ll Value) { + unsafe { + let bitcast = llvm::LLVMConstPointerCast(new_g, self.val_ty(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); + } + } } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 56791ee376c04..0de2193edfb98 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -422,10 +422,22 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { &self.stats } + fn consume_stats(self) -> RefCell { + self.stats + } + fn codegen_unit(&self) -> &Arc> { &self.codegen_unit } + fn statics_to_rauw(&self) -> &RefCell> { + &self.statics_to_rauw + } + + fn used_statics(&self) -> &RefCell> { + &self.used_statics + } + fn set_frame_pointer_elimination(&self, llfn: &'ll Value) { attributes::set_frame_pointer_elimination(self, llfn) } @@ -433,6 +445,25 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn apply_target_cpu_attr(&self, llfn: &'ll Value) { attributes::apply_target_cpu_attr(self, llfn) } + + + fn create_used_variable(&self) { + let name = const_cstr!("llvm.used"); + let section = const_cstr!("llvm.metadata"); + let array = self.const_array( + &self.type_ptr_to(self.type_i8()), + &*self.used_statics.borrow() + ); + + unsafe { + let g = llvm::LLVMAddGlobal(self.llmod, + self.val_ty(array), + name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); + } + } } impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 6e93039e273fa..096a22e61dde1 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -592,4 +592,8 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V ) -> &'ll DILexicalBlock { metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate) } + + fn debuginfo_finalize(&self) { + finalize(self) + } } diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index 227a955945f80..f4d1a0bf2a6e9 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -9,14 +9,17 @@ // except according to those terms. use super::CodegenObject; +use super::builder::{HasCodegen, BuilderMethods}; use ModuleCodegen; use rustc::session::Session; use rustc::middle::cstore::EncodedMetadata; use rustc::middle::allocator::AllocatorKind; +use monomorphize::partitioning::CodegenUnit; use rustc::ty::TyCtxt; use time_graph::TimeGraph; use std::sync::mpsc::Receiver; use std::any::Any; +use std::sync::Arc; pub trait Backend<'ll> { type Value : 'll + CodegenObject; @@ -25,16 +28,17 @@ pub trait Backend<'ll> { type Context; } -pub trait BackendMethods { +pub trait BackendMethods<'a, 'll: 'a, 'tcx: 'll> { type Metadata; type OngoingCodegen; + type Builder : BuilderMethods<'a, 'll, 'tcx>; fn thin_lto_available(&self) -> bool; fn pgo_available(&self) -> bool; fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Metadata; - fn write_metadata<'a, 'gcx>( + fn write_metadata<'b, 'gcx>( &self, - tcx: TyCtxt<'a, 'gcx, 'gcx>, + tcx: TyCtxt<'b, 'gcx, 'gcx>, metadata: &Self::Metadata ) -> EncodedMetadata; fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Metadata, kind: AllocatorKind); @@ -56,4 +60,10 @@ pub trait BackendMethods { fn codegen_finished(&self, codegen: &Self::OngoingCodegen, tcx: TyCtxt); fn check_for_errors(&self, codegen: &Self::OngoingCodegen, sess: &Session); fn wait_for_signal_to_codegen_item(&self, codegen: &Self::OngoingCodegen); + fn new_codegen_context( + &self, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + codegen_unit: Arc>, + llvm_module: &'ll Self::Metadata + ) -> >::CodegenCx; } diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs index 36bcb0b67ec55..5b23a2f0baed9 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -41,7 +41,7 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { llfn: Self::Value, mir: &mir::Mir, ) -> FunctionDebugContext<'ll>; - + fn create_mir_scopes( &self, mir: &mir::Mir, @@ -53,6 +53,7 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { file: &syntax_pos::SourceFile, defining_crate: CrateNum, ) -> Self::DIScope; + fn debuginfo_finalize(&self); } pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs index dceda584c2f17..6344d19464d97 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -29,7 +29,11 @@ pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn eh_unwind_resume(&self) -> Self::Value; fn sess(&self) -> &Session; fn stats(&self) -> &RefCell; + fn consume_stats(self) -> RefCell; fn codegen_unit(&self) -> &Arc>; + fn statics_to_rauw(&self) -> &RefCell>; + fn used_statics(&self) -> &RefCell>; fn set_frame_pointer_elimination(&self, llfn: Self::Value); fn apply_target_cpu_attr(&self, llfn: Self::Value); + fn create_used_variable(&self); } diff --git a/src/librustc_codegen_llvm/interfaces/statics.rs b/src/librustc_codegen_llvm/interfaces/statics.rs index 782bf67cb063a..125f86951161b 100644 --- a/src/librustc_codegen_llvm/interfaces/statics.rs +++ b/src/librustc_codegen_llvm/interfaces/statics.rs @@ -33,4 +33,5 @@ pub trait StaticMethods<'ll> : Backend<'ll> { def_id: DefId, is_mutable: bool, ); + fn static_replace_all_uses(&self, old_g: Self::Value, new_g: Self::Value); } diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 2d87d42bed6e0..41142cd980e77 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -72,9 +72,14 @@ use interfaces::*; use time_graph::TimeGraph; use std::sync::mpsc::Receiver; use back::write::{self, OngoingCodegen}; +use builder::Builder; +use value::Value; +use context::CodegenCx; +use monomorphize::partitioning::CodegenUnit; pub use llvm_util::target_features; use std::any::Any; +use std::sync::Arc; use std::path::{PathBuf}; use std::sync::mpsc; use rustc_data_structures::sync::Lrc; @@ -142,9 +147,10 @@ mod value; pub struct LlvmCodegenBackend(()); -impl BackendMethods for LlvmCodegenBackend { +impl<'a, 'll: 'a, 'tcx: 'll> BackendMethods<'a, 'll, 'tcx> for LlvmCodegenBackend { type Metadata = ModuleLlvm; type OngoingCodegen = OngoingCodegen; + type Builder = Builder<'a, 'll, 'tcx, &'ll Value>; fn thin_lto_available(&self) -> bool { unsafe { !llvm::LLVMRustThinLTOAvailable() } @@ -155,9 +161,9 @@ impl BackendMethods for LlvmCodegenBackend { fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm { ModuleLlvm::new(sess, mod_name) } - fn write_metadata<'a, 'gcx>( + fn write_metadata<'b, 'gcx>( &self, - tcx: TyCtxt<'a, 'gcx, 'gcx>, + tcx: TyCtxt<'b, 'gcx, 'gcx>, metadata: &ModuleLlvm ) -> EncodedMetadata { base::write_metadata(tcx, metadata) @@ -192,6 +198,14 @@ impl BackendMethods for LlvmCodegenBackend { fn wait_for_signal_to_codegen_item(&self, codegen: &OngoingCodegen) { codegen.wait_for_signal_to_codegen_item() } + fn new_codegen_context( + &self, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + codegen_unit: Arc>, + llvm_module: &'ll ModuleLlvm + ) -> CodegenCx<'ll, 'tcx, &'ll Value> { + CodegenCx::new(tcx, codegen_unit, llvm_module) + } } diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index 434191e338af5..09c1bb419678a 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -28,18 +28,17 @@ use rustc::ty::{TypeFoldable, Ty}; use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use std::fmt; use value::Value; -use builder::Builder; use interfaces::*; pub use rustc::mir::mono::MonoItem; pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; -pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>> : - fmt::Debug + BaseMonoItemExt<'ll, 'tcx> where - &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll> : fmt::Debug + BaseMonoItemExt<'ll, 'tcx> { - fn define(&self, cx: &'a Bx::CodegenCx) { + fn define>(&self, cx: &'a Bx::CodegenCx) where + &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", self.to_string(*cx.tcx()), self.to_raw_string(), @@ -78,10 +77,14 @@ pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>> cx.codegen_unit().name()); } - fn predefine(&self, - cx: &'a Bx::CodegenCx, - linkage: Linkage, - visibility: Visibility) { + fn predefine>( + &self, + cx: &'a Bx::CodegenCx, + linkage: Linkage, + visibility: Visibility + ) where + &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", self.to_string(*cx.tcx()), self.to_raw_string(), @@ -124,7 +127,7 @@ pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>> } } -impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx, Builder<'a, 'll, 'tcx, &'ll Value>> +impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx> for MonoItem<'tcx> {} impl<'ll, 'tcx: 'll> PreDefineMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { From 3f6f7539d3e4bf688cef9b904508c32b29356d86 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 27 Sep 2018 15:31:20 +0200 Subject: [PATCH 58/76] Added compile codegen to backend trait --- src/librustc_codegen_llvm/base.rs | 9 +++++---- .../interfaces/backend.rs | 15 ++++++--------- src/librustc_codegen_llvm/lib.rs | 19 +++++++------------ 3 files changed, 18 insertions(+), 25 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index b90e3394e91bc..7b9f826a1535b 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -57,6 +57,7 @@ use rustc_mir::monomorphize::item::DefPathBasedNames; use common::{self, IntPredicate, RealPredicate, TypeKind}; use meth; use mir; +use context::CodegenCx; use monomorphize::Instance; use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; use rustc_codegen_utils::symbol_names_test; @@ -719,7 +720,7 @@ fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn codegen_crate<'a, 'll: 'a, 'tcx: 'll, B : BackendMethods<'a, 'll, 'tcx>>( +pub fn codegen_crate( backend: B, tcx: TyCtxt<'ll, 'tcx, 'tcx>, rx: mpsc::Receiver> @@ -877,7 +878,7 @@ pub fn codegen_crate<'a, 'll: 'a, 'tcx: 'll, B : BackendMethods<'a, 'll, 'tcx>>( &format!("codegen {}", cgu.name())) }); let start_time = Instant::now(); - let stats = compile_codegen_unit(tcx, *cgu.name()); + let stats = backend.compile_codegen_unit(tcx, *cgu.name()); all_stats.extend(stats); total_codegen_time += start_time.elapsed(); false @@ -1166,7 +1167,7 @@ fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { all_mono_items.contains(&id) } -fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, +pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { let start_time = Instant::now(); @@ -1198,7 +1199,7 @@ fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, // Instantiate monomorphizations without filling out definitions yet... let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str()); let stats = { - let cx = backend.new_codegen_context(tcx, cgu, &llvm_module); + let cx = CodegenCx::new(tcx, cgu, &llvm_module); let mono_items = cx.codegen_unit .items_in_deterministic_order(cx.tcx); for &(mono_item, (linkage, visibility)) in &mono_items { diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_llvm/interfaces/backend.rs index f4d1a0bf2a6e9..bc79d946f56f4 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_llvm/interfaces/backend.rs @@ -9,17 +9,16 @@ // except according to those terms. use super::CodegenObject; -use super::builder::{HasCodegen, BuilderMethods}; use ModuleCodegen; use rustc::session::Session; use rustc::middle::cstore::EncodedMetadata; use rustc::middle::allocator::AllocatorKind; -use monomorphize::partitioning::CodegenUnit; use rustc::ty::TyCtxt; +use rustc::mir::mono::Stats; +use syntax_pos::symbol::InternedString; use time_graph::TimeGraph; use std::sync::mpsc::Receiver; use std::any::Any; -use std::sync::Arc; pub trait Backend<'ll> { type Value : 'll + CodegenObject; @@ -28,10 +27,9 @@ pub trait Backend<'ll> { type Context; } -pub trait BackendMethods<'a, 'll: 'a, 'tcx: 'll> { +pub trait BackendMethods { type Metadata; type OngoingCodegen; - type Builder : BuilderMethods<'a, 'll, 'tcx>; fn thin_lto_available(&self) -> bool; fn pgo_available(&self) -> bool; @@ -60,10 +58,9 @@ pub trait BackendMethods<'a, 'll: 'a, 'tcx: 'll> { fn codegen_finished(&self, codegen: &Self::OngoingCodegen, tcx: TyCtxt); fn check_for_errors(&self, codegen: &Self::OngoingCodegen, sess: &Session); fn wait_for_signal_to_codegen_item(&self, codegen: &Self::OngoingCodegen); - fn new_codegen_context( + fn compile_codegen_unit<'ll, 'tcx: 'll>( &self, tcx: TyCtxt<'ll, 'tcx, 'tcx>, - codegen_unit: Arc>, - llvm_module: &'ll Self::Metadata - ) -> >::CodegenCx; + cgu_name: InternedString + ) -> Stats ; } diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 41142cd980e77..016cac4448392 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -72,14 +72,11 @@ use interfaces::*; use time_graph::TimeGraph; use std::sync::mpsc::Receiver; use back::write::{self, OngoingCodegen}; -use builder::Builder; -use value::Value; -use context::CodegenCx; -use monomorphize::partitioning::CodegenUnit; +use syntax_pos::symbol::InternedString; +use rustc::mir::mono::Stats; pub use llvm_util::target_features; use std::any::Any; -use std::sync::Arc; use std::path::{PathBuf}; use std::sync::mpsc; use rustc_data_structures::sync::Lrc; @@ -147,10 +144,9 @@ mod value; pub struct LlvmCodegenBackend(()); -impl<'a, 'll: 'a, 'tcx: 'll> BackendMethods<'a, 'll, 'tcx> for LlvmCodegenBackend { +impl BackendMethods for LlvmCodegenBackend { type Metadata = ModuleLlvm; type OngoingCodegen = OngoingCodegen; - type Builder = Builder<'a, 'll, 'tcx, &'ll Value>; fn thin_lto_available(&self) -> bool { unsafe { !llvm::LLVMRustThinLTOAvailable() } @@ -198,13 +194,12 @@ impl<'a, 'll: 'a, 'tcx: 'll> BackendMethods<'a, 'll, 'tcx> for LlvmCodegenBacken fn wait_for_signal_to_codegen_item(&self, codegen: &OngoingCodegen) { codegen.wait_for_signal_to_codegen_item() } - fn new_codegen_context( + fn compile_codegen_unit<'ll, 'tcx: 'll>( &self, tcx: TyCtxt<'ll, 'tcx, 'tcx>, - codegen_unit: Arc>, - llvm_module: &'ll ModuleLlvm - ) -> CodegenCx<'ll, 'tcx, &'ll Value> { - CodegenCx::new(tcx, codegen_unit, llvm_module) + cgu_name: InternedString + ) -> Stats { + base::compile_codegen_unit(tcx, cgu_name) } } From 570d3c001b528a4e3a88ba3016af9584a710ae0d Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 28 Sep 2018 11:40:59 +0200 Subject: [PATCH 59/76] Starting to move backend-agnostic code into codegen_utils IntPredicate moved --- src/librustc_codegen_llvm/base.rs | 3 ++- src/librustc_codegen_llvm/builder.rs | 1 + src/librustc_codegen_llvm/common.rs | 13 ----------- src/librustc_codegen_llvm/glue.rs | 2 +- .../interfaces/builder.rs | 1 + src/librustc_codegen_llvm/llvm/ffi.rs | 23 ++++++++++--------- src/librustc_codegen_llvm/mir/block.rs | 3 ++- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 3 ++- src/librustc_codegen_utils/common.rs | 22 ++++++++++++++++++ src/librustc_codegen_utils/lib.rs | 1 + 11 files changed, 45 insertions(+), 29 deletions(-) create mode 100644 src/librustc_codegen_utils/common.rs diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 7b9f826a1535b..6c2377067bb2c 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -54,7 +54,8 @@ use builder::{Builder, MemFlags}; use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, IntPredicate, RealPredicate, TypeKind}; +use common::{self, RealPredicate, TypeKind}; +use rustc_codegen_utils::common::IntPredicate; use meth; use mir; use context::CodegenCx; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 19eb5f8caf85f..2ad23428c7550 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -11,6 +11,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; use common::{self, *}; +use rustc_codegen_utils::common::IntPredicate; use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 45cc272753d76..a01af50615a65 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -67,19 +67,6 @@ impl OperandBundleDef<'ll, V> { } } -pub enum IntPredicate { - IntEQ, - IntNE, - IntUGT, - IntUGE, - IntULT, - IntULE, - IntSGT, - IntSGE, - IntSLT, - IntSLE -} - #[allow(dead_code)] pub enum RealPredicate { RealPredicateFalse, diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index 49db1c7694861..f0868962af074 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -14,7 +14,7 @@ use std; -use common::*; +use rustc_codegen_utils::common::IntPredicate; use meth; use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::ty::{self, Ty}; diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index b5c6b453b401c..5a06bb6aeda4f 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -9,6 +9,7 @@ // except according to those terms. use common::*; +use rustc_codegen_utils::common::IntPredicate; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 87bbf3f7ab555..883941b89ab21 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -20,6 +20,7 @@ use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; use common; +use rustc_codegen_utils; use syntax; use super::RustString; @@ -144,18 +145,18 @@ pub enum IntPredicate { } impl IntPredicate { - pub fn from_generic(intpre: common::IntPredicate) -> Self { + pub fn from_generic(intpre: rustc_codegen_utils::common::IntPredicate) -> Self { match intpre { - common::IntPredicate::IntEQ => IntPredicate::IntEQ, - common::IntPredicate::IntNE => IntPredicate::IntNE, - common::IntPredicate::IntUGT => IntPredicate::IntUGT, - common::IntPredicate::IntUGE => IntPredicate::IntUGE, - common::IntPredicate::IntULT => IntPredicate::IntULT, - common::IntPredicate::IntULE => IntPredicate::IntULE, - common::IntPredicate::IntSGT => IntPredicate::IntSGT, - common::IntPredicate::IntSGE => IntPredicate::IntSGE, - common::IntPredicate::IntSLT => IntPredicate::IntSLT, - common::IntPredicate::IntSLE => IntPredicate::IntSLE, + rustc_codegen_utils::common::IntPredicate::IntEQ => IntPredicate::IntEQ, + rustc_codegen_utils::common::IntPredicate::IntNE => IntPredicate::IntNE, + rustc_codegen_utils::common::IntPredicate::IntUGT => IntPredicate::IntUGT, + rustc_codegen_utils::common::IntPredicate::IntUGE => IntPredicate::IntUGE, + rustc_codegen_utils::common::IntPredicate::IntULT => IntPredicate::IntULT, + rustc_codegen_utils::common::IntPredicate::IntULE => IntPredicate::IntULE, + rustc_codegen_utils::common::IntPredicate::IntSGT => IntPredicate::IntSGT, + rustc_codegen_utils::common::IntPredicate::IntSGE => IntPredicate::IntSGE, + rustc_codegen_utils::common::IntPredicate::IntSLT => IntPredicate::IntSLT, + rustc_codegen_utils::common::IntPredicate::IntSLE => IntPredicate::IntSLE, } } } diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index 5c3f21f807e87..f95806ea2b878 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -17,7 +17,8 @@ use abi::{Abi, FnType, PassMode}; use rustc_target::abi::call::ArgType; use base; use builder::MemFlags; -use common::{self, IntPredicate}; +use common; +use rustc_codegen_utils::common::IntPredicate; use meth; use monomorphize; diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index a2a578585da60..39eb45b7d8ee3 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -12,7 +12,7 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use common::IntPredicate; +use rustc_codegen_utils::common::IntPredicate; use type_of::LayoutLlvmExt; use glue; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index 939b369bb7b5f..dde3f94c3fa1c 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -18,7 +18,8 @@ use std::{u128, i128}; use base; use callee; -use common::{self, IntPredicate, RealPredicate}; +use common::{self, RealPredicate}; +use rustc_codegen_utils::common::IntPredicate; use monomorphize; use type_of::LayoutLlvmExt; diff --git a/src/librustc_codegen_utils/common.rs b/src/librustc_codegen_utils/common.rs new file mode 100644 index 0000000000000..b5640cfab3881 --- /dev/null +++ b/src/librustc_codegen_utils/common.rs @@ -0,0 +1,22 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub enum IntPredicate { + IntEQ, + IntNE, + IntUGT, + IntUGE, + IntULT, + IntULE, + IntSGT, + IntSGE, + IntSLT, + IntSLE +} diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index 03b3b20a4e772..eee6723dead29 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -46,6 +46,7 @@ pub mod link; pub mod codegen_backend; pub mod symbol_names; pub mod symbol_names_test; +pub mod common; /// check for the #[rustc_error] annotation, which forces an /// error in codegen. This is used to write compile-fail tests From 0da517049a6579b1c5b5d9963c297faf869b9804 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 28 Sep 2018 12:18:03 +0200 Subject: [PATCH 60/76] Moved common.rs enums --- src/librustc_codegen_llvm/base.rs | 4 +- src/librustc_codegen_llvm/builder.rs | 23 ++-- src/librustc_codegen_llvm/common.rs | 76 +---------- .../interfaces/builder.rs | 2 +- src/librustc_codegen_llvm/interfaces/type_.rs | 2 +- src/librustc_codegen_llvm/intrinsic.rs | 6 +- src/librustc_codegen_llvm/llvm/ffi.rs | 121 +++++++++--------- src/librustc_codegen_llvm/mir/rvalue.rs | 4 +- src/librustc_codegen_llvm/type_.rs | 5 +- src/librustc_codegen_utils/common.rs | 78 +++++++++++ 10 files changed, 166 insertions(+), 155 deletions(-) diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 6c2377067bb2c..1a913b700aa0d 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -54,8 +54,8 @@ use builder::{Builder, MemFlags}; use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, RealPredicate, TypeKind}; -use rustc_codegen_utils::common::IntPredicate; +use common; +use rustc_codegen_utils::common::{RealPredicate, TypeKind, IntPredicate}; use meth; use mir; use context::CodegenCx; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 2ad23428c7550..87311151930b7 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -10,8 +10,9 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; -use common::{self, *}; -use rustc_codegen_utils::common::IntPredicate; +use common; +use rustc_codegen_utils::common::{IntPredicate, TypeKind, RealPredicate}; +use rustc_codegen_utils; use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; @@ -514,7 +515,7 @@ impl BuilderMethods<'a, 'll, 'tcx> fn atomic_load( &self, ptr: &'ll Value, - order: common::AtomicOrdering, + order: rustc_codegen_utils::common::AtomicOrdering, align: Align ) -> &'ll Value { self.count_insn("load.atomic"); @@ -636,7 +637,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: common::AtomicOrdering, align: Align) { + order: rustc_codegen_utils::common::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); @@ -1168,8 +1169,8 @@ impl BuilderMethods<'a, 'll, 'tcx> dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: common::AtomicOrdering, - failure_order: common::AtomicOrdering, + order: rustc_codegen_utils::common::AtomicOrdering, + failure_order: rustc_codegen_utils::common::AtomicOrdering, weak: bool, ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; @@ -1187,10 +1188,10 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_rmw( &self, - op: common::AtomicRmwBinOp, + op: rustc_codegen_utils::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: common::AtomicOrdering, + order: rustc_codegen_utils::common::AtomicOrdering, ) -> &'ll Value { unsafe { llvm::LLVMBuildAtomicRMW( @@ -1203,7 +1204,11 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn atomic_fence(&self, order: common::AtomicOrdering, scope: common::SynchronizationScope) { + fn atomic_fence( + &self, + order: rustc_codegen_utils::common::AtomicOrdering, + scope: rustc_codegen_utils::common::SynchronizationScope + ) { unsafe { llvm::LLVMRustBuildAtomicFence( self.llbuilder, diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index a01af50615a65..f56b2da9fe523 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -30,6 +30,7 @@ use rustc::hir; use interfaces::BuilderMethods; use mir::constant::const_alloc_to_llvm; use mir::place::PlaceRef; +use rustc_codegen_utils::common::TypeKind; use libc::{c_uint, c_char}; use std::iter; @@ -67,81 +68,6 @@ impl OperandBundleDef<'ll, V> { } } -#[allow(dead_code)] -pub enum RealPredicate { - RealPredicateFalse, - RealOEQ, - RealOGT, - RealOGE, - RealOLT, - RealOLE, - RealONE, - RealORD, - RealUNO, - RealUEQ, - RealUGT, - RealUGE, - RealULT, - RealULE, - RealUNE, - RealPredicateTrue -} - -pub enum AtomicRmwBinOp { - AtomicXchg, - AtomicAdd, - AtomicSub, - AtomicAnd, - AtomicNand, - AtomicOr, - AtomicXor, - AtomicMax, - AtomicMin, - AtomicUMax, - AtomicUMin -} - -pub enum AtomicOrdering { - #[allow(dead_code)] - NotAtomic, - Unordered, - Monotonic, - // Consume, // Not specified yet. - Acquire, - Release, - AcquireRelease, - SequentiallyConsistent, -} - -pub enum SynchronizationScope { - // FIXME: figure out if this variant is needed at all. - #[allow(dead_code)] - Other, - SingleThread, - CrossThread, -} - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum TypeKind { - Void, - Half, - Float, - Double, - X86_FP80, - FP128, - PPc_FP128, - Label, - Integer, - Function, - Struct, - Array, - Pointer, - Vector, - Metadata, - X86_MMX, - Token, -} - /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 5a06bb6aeda4f..de2f835456a37 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -9,7 +9,7 @@ // except according to those terms. use common::*; -use rustc_codegen_utils::common::IntPredicate; +use rustc_codegen_utils::common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp}; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 266c14a4af22b..517f1af634d96 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -10,7 +10,7 @@ use super::backend::Backend; use super::builder::HasCodegen; -use common::TypeKind; +use rustc_codegen_utils::common::TypeKind; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use std::cell::RefCell; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 3e875fd418d7c..c53d277e39633 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -17,13 +17,13 @@ use abi::{Abi, FnType, LlvmType, PassMode}; use mir::place::PlaceRef; use mir::operand::{OperandRef, OperandValue}; use base::*; -use common::*; use context::CodegenCx; use glue; use type_::Type; use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; +use rustc_codegen_utils::common::TypeKind; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -424,7 +424,9 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst name if name.starts_with("atomic_") => { - use self::AtomicOrdering::*; + use rustc_codegen_utils::common::AtomicOrdering::*; + use rustc_codegen_utils::common:: + {SynchronizationScope, AtomicRmwBinOp}; let split: Vec<&str> = name.split('_').collect(); diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 883941b89ab21..bbc2c95e1f2f4 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,7 +19,6 @@ use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; -use common; use rustc_codegen_utils; use syntax; @@ -184,24 +183,24 @@ pub enum RealPredicate { } impl RealPredicate { - pub fn from_generic(realpred: common::RealPredicate) -> Self { + pub fn from_generic(realpred: rustc_codegen_utils::common::RealPredicate) -> Self { match realpred { - common::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, - common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, - common::RealPredicate::RealOGT => RealPredicate::RealOGT, - common::RealPredicate::RealOGE => RealPredicate::RealOGE, - common::RealPredicate::RealOLT => RealPredicate::RealOLT, - common::RealPredicate::RealOLE => RealPredicate::RealOLE, - common::RealPredicate::RealONE => RealPredicate::RealONE, - common::RealPredicate::RealORD => RealPredicate::RealORD, - common::RealPredicate::RealUNO => RealPredicate::RealUNO, - common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, - common::RealPredicate::RealUGT => RealPredicate::RealUGT, - common::RealPredicate::RealUGE => RealPredicate::RealUGE, - common::RealPredicate::RealULT => RealPredicate::RealULT, - common::RealPredicate::RealULE => RealPredicate::RealULE, - common::RealPredicate::RealUNE => RealPredicate::RealUNE, - common::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue + rustc_codegen_utils::common::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, + rustc_codegen_utils::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + rustc_codegen_utils::common::RealPredicate::RealOGT => RealPredicate::RealOGT, + rustc_codegen_utils::common::RealPredicate::RealOGE => RealPredicate::RealOGE, + rustc_codegen_utils::common::RealPredicate::RealOLT => RealPredicate::RealOLT, + rustc_codegen_utils::common::RealPredicate::RealOLE => RealPredicate::RealOLE, + rustc_codegen_utils::common::RealPredicate::RealONE => RealPredicate::RealONE, + rustc_codegen_utils::common::RealPredicate::RealORD => RealPredicate::RealORD, + rustc_codegen_utils::common::RealPredicate::RealUNO => RealPredicate::RealUNO, + rustc_codegen_utils::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + rustc_codegen_utils::common::RealPredicate::RealUGT => RealPredicate::RealUGT, + rustc_codegen_utils::common::RealPredicate::RealUGE => RealPredicate::RealUGE, + rustc_codegen_utils::common::RealPredicate::RealULT => RealPredicate::RealULT, + rustc_codegen_utils::common::RealPredicate::RealULE => RealPredicate::RealULE, + rustc_codegen_utils::common::RealPredicate::RealUNE => RealPredicate::RealUNE, + rustc_codegen_utils::common::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue } } } @@ -216,7 +215,7 @@ pub enum TypeKind { Double = 3, X86_FP80 = 4, FP128 = 5, - PPc_FP128 = 6, + PPC_FP128 = 6, Label = 7, Integer = 8, Function = 9, @@ -230,25 +229,25 @@ pub enum TypeKind { } impl TypeKind { - pub fn to_generic(self) -> common::TypeKind { + pub fn to_generic(self) -> rustc_codegen_utils::common::TypeKind { match self { - TypeKind::Void => common::TypeKind::Void, - TypeKind::Half => common::TypeKind::Half, - TypeKind::Float => common::TypeKind::Float, - TypeKind::Double => common::TypeKind::Double, - TypeKind::X86_FP80 => common::TypeKind::X86_FP80, - TypeKind::FP128 => common::TypeKind::FP128, - TypeKind::PPc_FP128 => common::TypeKind::PPc_FP128, - TypeKind::Label => common::TypeKind::Label, - TypeKind::Integer => common::TypeKind::Integer, - TypeKind::Function => common::TypeKind::Function, - TypeKind::Struct => common::TypeKind::Struct, - TypeKind::Array => common::TypeKind::Array, - TypeKind::Pointer => common::TypeKind::Pointer, - TypeKind::Vector => common::TypeKind::Vector, - TypeKind::Metadata => common::TypeKind::Metadata, - TypeKind::X86_MMX => common::TypeKind::X86_MMX, - TypeKind::Token => common::TypeKind::Token, + TypeKind::Void => rustc_codegen_utils::common::TypeKind::Void, + TypeKind::Half => rustc_codegen_utils::common::TypeKind::Half, + TypeKind::Float => rustc_codegen_utils::common::TypeKind::Float, + TypeKind::Double => rustc_codegen_utils::common::TypeKind::Double, + TypeKind::X86_FP80 => rustc_codegen_utils::common::TypeKind::X86_FP80, + TypeKind::FP128 => rustc_codegen_utils::common::TypeKind::FP128, + TypeKind::PPC_FP128 => rustc_codegen_utils::common::TypeKind::PPC_FP128, + TypeKind::Label => rustc_codegen_utils::common::TypeKind::Label, + TypeKind::Integer => rustc_codegen_utils::common::TypeKind::Integer, + TypeKind::Function => rustc_codegen_utils::common::TypeKind::Function, + TypeKind::Struct => rustc_codegen_utils::common::TypeKind::Struct, + TypeKind::Array => rustc_codegen_utils::common::TypeKind::Array, + TypeKind::Pointer => rustc_codegen_utils::common::TypeKind::Pointer, + TypeKind::Vector => rustc_codegen_utils::common::TypeKind::Vector, + TypeKind::Metadata => rustc_codegen_utils::common::TypeKind::Metadata, + TypeKind::X86_MMX => rustc_codegen_utils::common::TypeKind::X86_MMX, + TypeKind::Token => rustc_codegen_utils::common::TypeKind::Token, } } } @@ -271,19 +270,19 @@ pub enum AtomicRmwBinOp { } impl AtomicRmwBinOp { - pub fn from_generic(op : common::AtomicRmwBinOp) -> Self { + pub fn from_generic(op : rustc_codegen_utils::common::AtomicRmwBinOp) -> Self { match op { - common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, - common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, - common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, - common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, - common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, - common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, - common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, - common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, - common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, - common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, - common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + rustc_codegen_utils::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin } } } @@ -304,15 +303,15 @@ pub enum AtomicOrdering { } impl AtomicOrdering { - pub fn from_generic(ao : common::AtomicOrdering) -> Self { + pub fn from_generic(ao : rustc_codegen_utils::common::AtomicOrdering) -> Self { match ao { - common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, - common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, - common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, - common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, - common::AtomicOrdering::Release => AtomicOrdering::Release, - common::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, - common::AtomicOrdering::SequentiallyConsistent => + rustc_codegen_utils::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + rustc_codegen_utils::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + rustc_codegen_utils::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + rustc_codegen_utils::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + rustc_codegen_utils::common::AtomicOrdering::Release => AtomicOrdering::Release, + rustc_codegen_utils::common::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, + rustc_codegen_utils::common::AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent } } @@ -331,11 +330,11 @@ pub enum SynchronizationScope { } impl SynchronizationScope { - pub fn from_generic(sc : common::SynchronizationScope) -> Self { + pub fn from_generic(sc : rustc_codegen_utils::common::SynchronizationScope) -> Self { match sc { - common::SynchronizationScope::Other => SynchronizationScope::Other, - common::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, - common::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, + rustc_codegen_utils::common::SynchronizationScope::Other => SynchronizationScope::Other, + rustc_codegen_utils::common::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, + rustc_codegen_utils::common::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, } } } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index dde3f94c3fa1c..dd64586d564cb 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -18,8 +18,8 @@ use std::{u128, i128}; use base; use callee; -use common::{self, RealPredicate}; -use rustc_codegen_utils::common::IntPredicate; +use common; +use rustc_codegen_utils::common::{RealPredicate, IntPredicate}; use monomorphize; use type_of::LayoutLlvmExt; diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 0d6ba8953172d..09b05c92c6454 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -26,7 +26,8 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::TyLayout; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; -use common::{self, TypeKind}; +use common; +use rustc_codegen_utils::common::TypeKind; use type_of::LayoutLlvmExt; use abi::{LlvmType, FnTypeExt}; @@ -225,7 +226,7 @@ impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, - TypeKind::FP128 | TypeKind::PPc_FP128 => 128, + TypeKind::FP128 | TypeKind::PPC_FP128 => 128, _ => bug!("llvm_float_width called on a non-float type") } } diff --git a/src/librustc_codegen_utils/common.rs b/src/librustc_codegen_utils/common.rs index b5640cfab3881..5dc138b31ff04 100644 --- a/src/librustc_codegen_utils/common.rs +++ b/src/librustc_codegen_utils/common.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(non_camel_case_types, non_snake_case)] + pub enum IntPredicate { IntEQ, IntNE, @@ -20,3 +22,79 @@ pub enum IntPredicate { IntSLT, IntSLE } + + +#[allow(dead_code)] +pub enum RealPredicate { + RealPredicateFalse, + RealOEQ, + RealOGT, + RealOGE, + RealOLT, + RealOLE, + RealONE, + RealORD, + RealUNO, + RealUEQ, + RealUGT, + RealUGE, + RealULT, + RealULE, + RealUNE, + RealPredicateTrue +} + +pub enum AtomicRmwBinOp { + AtomicXchg, + AtomicAdd, + AtomicSub, + AtomicAnd, + AtomicNand, + AtomicOr, + AtomicXor, + AtomicMax, + AtomicMin, + AtomicUMax, + AtomicUMin +} + +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic, + Unordered, + Monotonic, + // Consume, // Not specified yet. + Acquire, + Release, + AcquireRelease, + SequentiallyConsistent, +} + +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TypeKind { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPC_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, +} From a03673c6da13954ad3218d31261f9303bb1c9969 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 28 Sep 2018 14:24:17 +0200 Subject: [PATCH 61/76] Moved OperandBundleDef --- src/librustc_codegen_llvm/builder.rs | 23 ++++++++------ src/librustc_codegen_llvm/common.rs | 30 ++---------------- .../interfaces/builder.rs | 3 +- src/librustc_codegen_llvm/llvm/mod.rs | 6 ++-- src/librustc_codegen_llvm/type_.rs | 7 +++-- src/librustc_codegen_utils/common.rs | 31 ++++++++++++++++++- 6 files changed, 55 insertions(+), 45 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 87311151930b7..61009a58f2795 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -10,7 +10,6 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; -use common; use rustc_codegen_utils::common::{IntPredicate, TypeKind, RealPredicate}; use rustc_codegen_utils; use context::CodegenCx; @@ -193,12 +192,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn invoke(&self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { + fn invoke( + &self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + bundle: Option<&rustc_codegen_utils::common::OperandBundleDef<'ll, &'ll Value>> + ) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -1323,8 +1324,12 @@ impl BuilderMethods<'a, 'll, 'tcx> self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } - fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value { + fn call( + &self, + llfn: &'ll Value, + args: &[&'ll Value], + bundle: Option<&rustc_codegen_utils::common::OperandBundleDef<'ll, &'ll Value>> + ) -> &'ll Value { self.count_insn("call"); debug!("Call {:?} with args ({:?})", diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index f56b2da9fe523..2af78940685c9 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -30,7 +30,7 @@ use rustc::hir; use interfaces::BuilderMethods; use mir::constant::const_alloc_to_llvm; use mir::place::PlaceRef; -use rustc_codegen_utils::common::TypeKind; +use rustc_codegen_utils::common::{OperandBundleDef, TypeKind}; use libc::{c_uint, c_char}; use std::iter; @@ -38,36 +38,10 @@ use std::iter; use rustc_target::spec::abi::Abi; use syntax::symbol::LocalInternedString; use syntax::ast::Mutability; -use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::Span; pub use context::CodegenCx; -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} - -pub struct OperandBundleDef<'a, V> { - pub name: &'a str, - pub val: V -} - -impl OperandBundleDef<'ll, V> { - pub fn new(name: &'ll str, val: V) -> Self { - OperandBundleDef { - name, - val - } - } -} - /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index de2f835456a37..5eacc7e1178a4 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use common::*; -use rustc_codegen_utils::common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp}; +use rustc_codegen_utils::common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index 63a74cb6d6d50..b9f0fca05f90a 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -28,7 +28,7 @@ use std::ffi::CStr; use std::cell::RefCell; use libc::{self, c_uint, c_char, size_t}; use rustc_data_structures::small_c_str::SmallCStr; -use common; +use rustc_codegen_utils; pub mod archive_ro; pub mod diagnostic; @@ -273,7 +273,9 @@ impl OperandBundleDef<'a> { OperandBundleDef { raw: def } } - pub fn from_generic(bundle : &common::OperandBundleDef<'a, &'a Value>) -> Self { + pub fn from_generic( + bundle : &rustc_codegen_utils::common::OperandBundleDef<'a, &'a Value> + ) -> Self { Self::new(bundle.name, &[bundle.val]) } } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 09b05c92c6454..19e2414dc308c 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -27,6 +27,7 @@ use rustc::ty::layout::TyLayout; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; use common; +use rustc_codegen_utils; use rustc_codegen_utils::common::TypeKind; use type_of::LayoutLlvmExt; use abi::{LlvmType, FnTypeExt}; @@ -373,15 +374,15 @@ impl DerivedTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(*self.tcx(), ty) + rustc_codegen_utils::common::type_needs_drop(*self.tcx(), ty) } fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(*self.tcx(), ty) + rustc_codegen_utils::common::type_is_sized(*self.tcx(), ty) } fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(*self.tcx(), ty) + rustc_codegen_utils::common::type_is_freeze(*self.tcx(), ty) } fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { diff --git a/src/librustc_codegen_utils/common.rs b/src/librustc_codegen_utils/common.rs index 5dc138b31ff04..f962d98a2a958 100644 --- a/src/librustc_codegen_utils/common.rs +++ b/src/librustc_codegen_utils/common.rs @@ -7,9 +7,38 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. - #![allow(non_camel_case_types, non_snake_case)] +use rustc::ty::{self, Ty, TyCtxt}; +use syntax_pos::DUMMY_SP; + + +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) +} + +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) +} + +pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) +} + +pub struct OperandBundleDef<'a, V> { + pub name: &'a str, + pub val: V +} + +impl<'a, V> OperandBundleDef<'a, V> { + pub fn new(name: &'a str, val: V) -> Self { + OperandBundleDef { + name, + val + } + } +} + pub enum IntPredicate { IntEQ, IntNE, From 4a2f7c7cd95595d42cd6dc25614a368913bd5941 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 28 Sep 2018 14:58:43 +0200 Subject: [PATCH 62/76] Adapt to latest rustc master changes --- src/librustc_codegen_llvm/asm.rs | 14 +++++++------- src/librustc_codegen_llvm/interfaces/asm.rs | 2 +- src/librustc_codegen_llvm/interfaces/builder.rs | 2 +- src/librustc_codegen_llvm/mir/statement.rs | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 914391226e2ce..90a4363602fad 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -57,7 +57,7 @@ impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { // Default per-arch clobbers // Basically what clang does - let arch_clobbers = match &bx.sess().target.target.arch[..] { + let arch_clobbers = match &self.cx().sess().target.target.arch[..] { "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], "mips" | "mips64" => vec!["~{$1}"], _ => Vec::new() @@ -76,14 +76,14 @@ impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => bx.cx().type_void(), + 0 => self.cx().type_void(), 1 => output_types[0], - _ => bx.cx().type_struct(&output_types, false) + _ => self.cx().type_struct(&output_types, false) }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = bx.inline_asm_call( + let r = self.inline_asm_call( asm.as_ptr(), constraint_cstr.as_ptr(), &inputs, @@ -100,8 +100,8 @@ impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); for (i, (_, &place)) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) }; - OperandValue::Immediate(v).store(bx, place); + let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; + OperandValue::Immediate(v).store(self, place); } // Store mark in a metadata node so we can map LLVM errors @@ -116,9 +116,9 @@ impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1)); } - } return true; + } } impl AsmMethods for CodegenCx<'ll, 'tcx, &'ll Value> { diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_llvm/interfaces/asm.rs index 77b1b133f83af..6dfd43a7467d0 100644 --- a/src/librustc_codegen_llvm/interfaces/asm.rs +++ b/src/librustc_codegen_llvm/interfaces/asm.rs @@ -20,7 +20,7 @@ pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ ia: &InlineAsm, outputs: Vec>::Value>>, inputs: Vec<>::Value> - ); + ) -> bool; } pub trait AsmMethods { diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 5eacc7e1178a4..c9681f8dd6c84 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -386,7 +386,7 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + volatile: bool, alignstack: bool, dia: AsmDialect - ) -> >::Value; + ) -> Option<>::Value>; fn minnum( &self, diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs index 1e6e784d33b6c..623962c20ad88 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_llvm/mir/statement.rs @@ -100,13 +100,13 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> }); if input_vals.is_err() { - span_err!(bx.cx.sess(), statement.source_info.span, E0669, + span_err!(bx.cx().sess(), statement.source_info.span, E0669, "invalid value for constraint in inline assembly"); } else { let input_vals = input_vals.unwrap(); - let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals); + let res = xb.codegen_inline_asm(asm, outputs, input_vals); if !res { - span_err!(bx.cx.sess(), statement.source_info.span, E0668, + span_err!(bx.cx().sess(), statement.source_info.span, E0668, "malformed inline assembly"); } } From b8e7061e6231685997791c2c2bae202d506554ea Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 28 Sep 2018 17:16:36 +0200 Subject: [PATCH 63/76] Moved Funclet --- src/librustc_codegen_llvm/common.rs | 38 +------------------ src/librustc_codegen_llvm/interfaces/mod.rs | 5 +-- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_utils/common.rs | 39 ++++++++++++++++++++ src/librustc_codegen_utils/interfaces/mod.rs | 13 +++++++ src/librustc_codegen_utils/lib.rs | 1 + 6 files changed, 56 insertions(+), 42 deletions(-) create mode 100644 src/librustc_codegen_utils/interfaces/mod.rs diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 2af78940685c9..c46a103015ff3 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -30,7 +30,7 @@ use rustc::hir; use interfaces::BuilderMethods; use mir::constant::const_alloc_to_llvm; use mir::place::PlaceRef; -use rustc_codegen_utils::common::{OperandBundleDef, TypeKind}; +use rustc_codegen_utils::common::TypeKind; use libc::{c_uint, c_char}; use std::iter; @@ -68,42 +68,6 @@ pub use context::CodegenCx; * */ -/// A structure representing an active landing pad for the duration of a basic -/// block. -/// -/// Each `Block` may contain an instance of this, indicating whether the block -/// is part of a landing pad or not. This is used to make decision about whether -/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to -/// use `invoke`) and also about various function call metadata. -/// -/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is -/// just a bunch of `None` instances (not too interesting), but for MSVC -/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data. -/// When inside of a landing pad, each function call in LLVM IR needs to be -/// annotated with which landing pad it's a part of. This is accomplished via -/// the `OperandBundleDef` value created for MSVC landing pads. -pub struct Funclet<'ll, V> { - cleanuppad: V, - operand: OperandBundleDef<'ll, V>, -} - -impl<'ll, V : CodegenObject> Funclet<'ll, V> { - pub fn new(cleanuppad: V) -> Self { - Funclet { - cleanuppad, - operand: OperandBundleDef::new("funclet", cleanuppad), - } - } - - pub fn cleanuppad(&self) -> V { - self.cleanuppad - } - - pub fn bundle(&self) -> &OperandBundleDef<'ll, V> { - &self.operand - } -} - impl Backend<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { type Value = &'ll Value; type BasicBlock = &'ll BasicBlock; diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 5bede58df6fde..a2c2443ab8bd2 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -32,13 +32,10 @@ pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; pub use self::abi::{AbiMethods, AbiBuilderMethods}; pub use self::declare::{DeclareMethods, PreDefineMethods}; pub use self::asm::{AsmMethods, AsmBuilderMethods}; - -use std::fmt; +pub use rustc_codegen_utils::interfaces::CodegenObject; pub trait CodegenMethods<'ll, 'tcx: 'll> : Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods + PreDefineMethods<'ll, 'tcx> {} - -pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index 879d27ff361a2..ce290c87c2146 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -16,7 +16,7 @@ use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; use base; -use common::Funclet; +use rustc_codegen_utils::common::Funclet; use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; use abi::{FnType, PassMode}; diff --git a/src/librustc_codegen_utils/common.rs b/src/librustc_codegen_utils/common.rs index f962d98a2a958..832513effd6fa 100644 --- a/src/librustc_codegen_utils/common.rs +++ b/src/librustc_codegen_utils/common.rs @@ -11,6 +11,7 @@ use rustc::ty::{self, Ty, TyCtxt}; use syntax_pos::DUMMY_SP; +use interfaces::CodegenObject; pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { @@ -39,6 +40,44 @@ impl<'a, V> OperandBundleDef<'a, V> { } } + + +/// A structure representing an active landing pad for the duration of a basic +/// block. +/// +/// Each `Block` may contain an instance of this, indicating whether the block +/// is part of a landing pad or not. This is used to make decision about whether +/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to +/// use `invoke`) and also about various function call metadata. +/// +/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is +/// just a bunch of `None` instances (not too interesting), but for MSVC +/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data. +/// When inside of a landing pad, each function call in LLVM IR needs to be +/// annotated with which landing pad it's a part of. This is accomplished via +/// the `OperandBundleDef` value created for MSVC landing pads. +pub struct Funclet<'ll, V> { + cleanuppad: V, + operand: OperandBundleDef<'ll, V>, +} + +impl<'ll, V : CodegenObject> Funclet<'ll, V> { + pub fn new(cleanuppad: V) -> Self { + Funclet { + cleanuppad, + operand: OperandBundleDef::new("funclet", cleanuppad), + } + } + + pub fn cleanuppad(&self) -> V { + self.cleanuppad + } + + pub fn bundle(&self) -> &OperandBundleDef<'ll, V> { + &self.operand + } +} + pub enum IntPredicate { IntEQ, IntNE, diff --git a/src/librustc_codegen_utils/interfaces/mod.rs b/src/librustc_codegen_utils/interfaces/mod.rs new file mode 100644 index 0000000000000..34752040fce90 --- /dev/null +++ b/src/librustc_codegen_utils/interfaces/mod.rs @@ -0,0 +1,13 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt; + +pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index eee6723dead29..331e2dc6c9ce2 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -47,6 +47,7 @@ pub mod codegen_backend; pub mod symbol_names; pub mod symbol_names_test; pub mod common; +pub mod interfaces; /// check for the #[rustc_error] annotation, which forces an /// error in codegen. This is used to write compile-fail tests From c785979e7ca5940aceed4fd1c39ed77d24e24789 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 1 Oct 2018 10:32:09 +0200 Subject: [PATCH 64/76] Moved Backend interface into rustc_codegen_utils --- src/librustc_codegen_llvm/back/lto.rs | 3 +- src/librustc_codegen_llvm/back/write.rs | 4 +- src/librustc_codegen_llvm/base.rs | 26 +------ src/librustc_codegen_llvm/interfaces/abi.rs | 2 +- src/librustc_codegen_llvm/interfaces/asm.rs | 2 +- .../interfaces/builder.rs | 2 +- .../interfaces/debuginfo.rs | 2 +- .../interfaces/declare.rs | 2 +- .../interfaces/intrinsic.rs | 2 +- src/librustc_codegen_llvm/interfaces/misc.rs | 2 +- src/librustc_codegen_llvm/interfaces/mod.rs | 3 +- .../interfaces/statics.rs | 2 +- src/librustc_codegen_llvm/interfaces/type_.rs | 2 +- src/librustc_codegen_llvm/lib.rs | 70 +----------------- src/librustc_codegen_utils/common.rs | 22 ++++++ .../interfaces/backend.rs | 2 +- src/librustc_codegen_utils/interfaces/mod.rs | 3 + src/librustc_codegen_utils/lib.rs | 72 +++++++++++++++++++ 18 files changed, 114 insertions(+), 109 deletions(-) rename src/{librustc_codegen_llvm => librustc_codegen_utils}/interfaces/backend.rs (98%) diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index a8192731b9a71..fd6bf637ad6fa 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -24,7 +24,8 @@ use rustc::session::config::{self, Lto}; use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; -use {ModuleCodegen, ModuleLlvm, ModuleKind}; +use ModuleLlvm; +use rustc_codegen_utils::{ModuleCodegen, ModuleKind}; use libc; diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 4fccc693aebbd..460d1bd96a48b 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -29,8 +29,8 @@ use rustc::util::nodemap::FxHashMap; use time_graph::{self, TimeGraph, Timeline}; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; -use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, ModuleLlvm, - CachedModuleCodegen}; +use {CodegenResults, ModuleLlvm}; +use rustc_codegen_utils::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule}; use CrateInfo; use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc::ty::TyCtxt; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 1a913b700aa0d..855a78ab50f83 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -24,9 +24,7 @@ //! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. use super::ModuleLlvm; -use super::ModuleCodegen; -use super::ModuleKind; -use super::CachedModuleCodegen; +use rustc_codegen_utils::{ModuleCodegen, ModuleKind, CachedModuleCodegen}; use super::LlvmCodegenBackend; use abi; @@ -1314,25 +1312,3 @@ pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { Visibility::Protected => llvm::Visibility::Protected, } } - -// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement -// the HashStable trait. Normally DepGraph::with_task() calls are -// hidden behind queries, but CGU creation is a special case in two -// ways: (1) it's not a query and (2) CGU are output nodes, so their -// Fingerprints are not actually needed. It remains to be clarified -// how exactly this case will be handled in the red/green system but -// for now we content ourselves with providing a no-op HashStable -// implementation for CGUs. -mod temp_stable_hash_impls { - use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, - HashStable}; - use {ModuleCodegen, ModuleLlvm}; - - impl HashStable for ModuleCodegen { - fn hash_stable(&self, - _: &mut HCX, - _: &mut StableHasher) { - // do nothing - } - } -} diff --git a/src/librustc_codegen_llvm/interfaces/abi.rs b/src/librustc_codegen_llvm/interfaces/abi.rs index 48827455b7b4b..3f85b68447dc7 100644 --- a/src/librustc_codegen_llvm/interfaces/abi.rs +++ b/src/librustc_codegen_llvm/interfaces/abi.rs @@ -10,7 +10,7 @@ use abi::FnType; use rustc::ty::{FnSig, Ty, Instance}; -use super::backend::Backend; +use super::Backend; use super::builder::HasCodegen; pub trait AbiMethods<'tcx> { diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_llvm/interfaces/asm.rs index 6dfd43a7467d0..820ea3ce4ac35 100644 --- a/src/librustc_codegen_llvm/interfaces/asm.rs +++ b/src/librustc_codegen_llvm/interfaces/asm.rs @@ -10,7 +10,7 @@ use rustc::hir::{InlineAsm, GlobalAsm}; use mir::place::PlaceRef; -use super::backend::Backend; +use super::Backend; use super::builder::HasCodegen; pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index c9681f8dd6c84..51d03ef88c1f6 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -13,7 +13,7 @@ use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; use builder::MemFlags; -use super::backend::Backend; +use super::Backend; use super::CodegenMethods; use super::debuginfo::DebugInfoBuilderMethods; use super::intrinsic::IntrinsicCallMethods; diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs index 5b23a2f0baed9..4b9a100b2fbe2 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_llvm/interfaces/debuginfo.rs @@ -9,7 +9,7 @@ // except according to those terms. use rustc::ty::{Ty, FnSig}; -use super::backend::Backend; +use super::Backend; use super::builder::HasCodegen; use rustc::mir; use monomorphize::Instance; diff --git a/src/librustc_codegen_llvm/interfaces/declare.rs b/src/librustc_codegen_llvm/interfaces/declare.rs index 8b48796593770..9a88a2fe3a208 100644 --- a/src/librustc_codegen_llvm/interfaces/declare.rs +++ b/src/librustc_codegen_llvm/interfaces/declare.rs @@ -9,7 +9,7 @@ // except according to those terms. use rustc::ty::Ty; -use super::backend::Backend; +use super::Backend; use rustc::hir::def_id::DefId; use rustc::mir::mono::{Linkage, Visibility}; use monomorphize::Instance; diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs index 1a8f4db900c81..f7491758c783a 100644 --- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs +++ b/src/librustc_codegen_llvm/interfaces/intrinsic.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::backend::Backend; +use super::Backend; use super::builder::HasCodegen; use mir::operand::OperandRef; use rustc::ty::Ty; diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_llvm/interfaces/misc.rs index 6344d19464d97..70ab7ec1419e6 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_llvm/interfaces/misc.rs @@ -11,7 +11,7 @@ use std::cell::RefCell; use rustc::util::nodemap::FxHashMap; use rustc::ty::{Ty, self, Instance}; -use super::backend::Backend; +use super::Backend; use rustc::session::Session; use libc::c_uint; use rustc::mir::mono::Stats; diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index a2c2443ab8bd2..696316338ce20 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -9,7 +9,6 @@ // except according to those terms. mod builder; -mod backend; mod consts; mod type_; mod intrinsic; @@ -21,7 +20,7 @@ mod declare; mod asm; pub use self::builder::{BuilderMethods, HasCodegen}; -pub use self::backend::{Backend, BackendMethods}; +pub use rustc_codegen_utils::interfaces::{Backend, BackendMethods}; pub use self::consts::ConstMethods; pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, ArgTypeMethods}; diff --git a/src/librustc_codegen_llvm/interfaces/statics.rs b/src/librustc_codegen_llvm/interfaces/statics.rs index 125f86951161b..1c77b891cc96e 100644 --- a/src/librustc_codegen_llvm/interfaces/statics.rs +++ b/src/librustc_codegen_llvm/interfaces/statics.rs @@ -10,7 +10,7 @@ use rustc::ty::layout::Align; use rustc::hir::def_id::DefId; -use super::backend::Backend; +use super::Backend; pub trait StaticMethods<'ll> : Backend<'ll> { fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index 517f1af634d96..ad35523faea79 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::backend::Backend; +use super::Backend; use super::builder::HasCodegen; use rustc_codegen_utils::common::TypeKind; use syntax::ast; diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 016cac4448392..78108718205f3 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -37,7 +37,6 @@ #![feature(static_nobundle)] use back::write::create_target_machine; -use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; #[macro_use] extern crate bitflags; @@ -67,7 +66,6 @@ extern crate cc; // Used to locate MSVC extern crate tempfile; extern crate memmap; -use back::bytecode::RLIB_BYTECODE_EXTENSION; use interfaces::*; use time_graph::TimeGraph; use std::sync::mpsc::Receiver; @@ -77,7 +75,6 @@ use rustc::mir::mono::Stats; pub use llvm_util::target_features; use std::any::Any; -use std::path::{PathBuf}; use std::sync::mpsc; use rustc_data_structures::sync::Lrc; @@ -94,6 +91,7 @@ use rustc::util::time_graph; use rustc::util::nodemap::{FxHashSet, FxHashMap}; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; +use rustc_codegen_utils::{ModuleCodegen, CompiledModule}; use rustc_codegen_utils::codegen_backend::CodegenBackend; use rustc_data_structures::svh::Svh; @@ -336,72 +334,6 @@ pub fn __rustc_codegen_backend() -> Box { LlvmCodegenBackend::new() } -pub struct ModuleCodegen { - /// The name of the module. When the crate may be saved between - /// compilations, incremental compilation requires that name be - /// unique amongst **all** crates. Therefore, it should contain - /// something unique to this crate (e.g., a module path) as well - /// as the crate name and disambiguator. - /// We currently generate these names via CodegenUnit::build_cgu_name(). - name: String, - module_llvm: M, - kind: ModuleKind, -} - -struct CachedModuleCodegen { - name: String, - source: WorkProduct, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum ModuleKind { - Regular, - Metadata, - Allocator, -} - -impl ModuleCodegen { - fn into_compiled_module(self, - emit_obj: bool, - emit_bc: bool, - emit_bc_compressed: bool, - outputs: &OutputFilenames) -> CompiledModule { - let object = if emit_obj { - Some(outputs.temp_path(OutputType::Object, Some(&self.name))) - } else { - None - }; - let bytecode = if emit_bc { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) - } else { - None - }; - let bytecode_compressed = if emit_bc_compressed { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) - .with_extension(RLIB_BYTECODE_EXTENSION)) - } else { - None - }; - - CompiledModule { - name: self.name.clone(), - kind: self.kind, - object, - bytecode, - bytecode_compressed, - } - } -} - -#[derive(Debug)] -struct CompiledModule { - name: String, - kind: ModuleKind, - object: Option, - bytecode: Option, - bytecode_compressed: Option, -} - pub struct ModuleLlvm { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, diff --git a/src/librustc_codegen_utils/common.rs b/src/librustc_codegen_utils/common.rs index 832513effd6fa..acd5bb3b93603 100644 --- a/src/librustc_codegen_utils/common.rs +++ b/src/librustc_codegen_utils/common.rs @@ -166,3 +166,25 @@ pub enum TypeKind { X86_MMX, Token, } + +// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement +// the HashStable trait. Normally DepGraph::with_task() calls are +// hidden behind queries, but CGU creation is a special case in two +// ways: (1) it's not a query and (2) CGU are output nodes, so their +// Fingerprints are not actually needed. It remains to be clarified +// how exactly this case will be handled in the red/green system but +// for now we content ourselves with providing a no-op HashStable +// implementation for CGUs. +mod temp_stable_hash_impls { + use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, + HashStable}; + use ModuleCodegen; + + impl HashStable for ModuleCodegen { + fn hash_stable(&self, + _: &mut HCX, + _: &mut StableHasher) { + // do nothing + } + } +} diff --git a/src/librustc_codegen_llvm/interfaces/backend.rs b/src/librustc_codegen_utils/interfaces/backend.rs similarity index 98% rename from src/librustc_codegen_llvm/interfaces/backend.rs rename to src/librustc_codegen_utils/interfaces/backend.rs index bc79d946f56f4..45f1f273ef967 100644 --- a/src/librustc_codegen_llvm/interfaces/backend.rs +++ b/src/librustc_codegen_utils/interfaces/backend.rs @@ -16,7 +16,7 @@ use rustc::middle::allocator::AllocatorKind; use rustc::ty::TyCtxt; use rustc::mir::mono::Stats; use syntax_pos::symbol::InternedString; -use time_graph::TimeGraph; +use rustc::util::time_graph::TimeGraph; use std::sync::mpsc::Receiver; use std::any::Any; diff --git a/src/librustc_codegen_utils/interfaces/mod.rs b/src/librustc_codegen_utils/interfaces/mod.rs index 34752040fce90..2659f0427c8b2 100644 --- a/src/librustc_codegen_utils/interfaces/mod.rs +++ b/src/librustc_codegen_utils/interfaces/mod.rs @@ -9,5 +9,8 @@ // except according to those terms. use std::fmt; +mod backend; + +pub use self::backend::{Backend, BackendMethods}; pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index 331e2dc6c9ce2..0b2089ea6aaef 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -21,6 +21,7 @@ #![feature(custom_attribute)] #![feature(nll)] #![allow(unused_attributes)] +#![allow(dead_code)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] @@ -40,7 +41,10 @@ extern crate syntax_pos; #[macro_use] extern crate rustc_data_structures; extern crate rustc_metadata_utils; +use std::path::PathBuf; use rustc::ty::TyCtxt; +use rustc::dep_graph::WorkProduct; +use rustc::session::config::{OutputFilenames, OutputType}; pub mod link; pub mod codegen_backend; @@ -49,6 +53,74 @@ pub mod symbol_names_test; pub mod common; pub mod interfaces; +pub struct ModuleCodegen { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + /// We currently generate these names via CodegenUnit::build_cgu_name(). + pub name: String, + pub module_llvm: M, + pub kind: ModuleKind, +} + +pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z"; + +impl ModuleCodegen { + pub fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + emit_bc_compressed: bool, + outputs: &OutputFilenames) -> CompiledModule { + let object = if emit_obj { + Some(outputs.temp_path(OutputType::Object, Some(&self.name))) + } else { + None + }; + let bytecode = if emit_bc { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) + } else { + None + }; + let bytecode_compressed = if emit_bc_compressed { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) + .with_extension(RLIB_BYTECODE_EXTENSION)) + } else { + None + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + object, + bytecode, + bytecode_compressed, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub object: Option, + pub bytecode: Option, + pub bytecode_compressed: Option, +} + +pub struct CachedModuleCodegen { + pub name: String, + pub source: WorkProduct, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + /// check for the #[rustc_error] annotation, which forces an /// error in codegen. This is used to write compile-fail tests /// that actually test that compilation succeeds without From 9a051421a8378c1740db58a12cb15e5840c677be Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 1 Oct 2018 11:19:00 +0200 Subject: [PATCH 65/76] Adapted to lastest rustc master changes --- src/librustc_codegen_llvm/declare.rs | 2 +- .../interfaces/builder.rs | 3 ++- src/librustc_codegen_llvm/interfaces/consts.rs | 2 +- src/librustc_codegen_llvm/llvm/ffi.rs | 18 ++++++++++++------ 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index ab87490b74f5c..7ef37affaed83 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -130,7 +130,7 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { let fty = FnType::new(self, sig, &[]); let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); - if self.layout_of(sig.output()).abi.is_uninhabited() { + if self.layout_of(sig.output()).abi == layout::Abi::Uninhabited { llvm::Attribute::NoReturn.apply_llfn(Function, llfn); } diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index 51d03ef88c1f6..b79b15c228665 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -8,7 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_codegen_utils::common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; +use rustc_codegen_utils::common::{IntPredicate, RealPredicate, AtomicOrdering, + SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; diff --git a/src/librustc_codegen_llvm/interfaces/consts.rs b/src/librustc_codegen_llvm/interfaces/consts.rs index 9df3468c51cb4..837aa0a349d93 100644 --- a/src/librustc_codegen_llvm/interfaces/consts.rs +++ b/src/librustc_codegen_llvm/interfaces/consts.rs @@ -41,7 +41,7 @@ pub trait ConstMethods<'ll, 'tcx: 'll> : Backend<'ll> { // NB: Do not use `do_spill_noroot` to make this into a constant string, or // you will be kicked off fast isel. See issue #4352 for an example of this. fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; - + fn const_fat_ptr( &self, ptr: Self::Value, diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index bbc2c95e1f2f4..90be85abda47d 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -185,7 +185,8 @@ pub enum RealPredicate { impl RealPredicate { pub fn from_generic(realpred: rustc_codegen_utils::common::RealPredicate) -> Self { match realpred { - rustc_codegen_utils::common::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, + rustc_codegen_utils::common::RealPredicate::RealPredicateFalse => + RealPredicate::RealPredicateFalse, rustc_codegen_utils::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, rustc_codegen_utils::common::RealPredicate::RealOGT => RealPredicate::RealOGT, rustc_codegen_utils::common::RealPredicate::RealOGE => RealPredicate::RealOGE, @@ -200,7 +201,8 @@ impl RealPredicate { rustc_codegen_utils::common::RealPredicate::RealULT => RealPredicate::RealULT, rustc_codegen_utils::common::RealPredicate::RealULE => RealPredicate::RealULE, rustc_codegen_utils::common::RealPredicate::RealUNE => RealPredicate::RealUNE, - rustc_codegen_utils::common::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue + rustc_codegen_utils::common::RealPredicate::RealPredicateTrue => + RealPredicate::RealPredicateTrue } } } @@ -310,7 +312,8 @@ impl AtomicOrdering { rustc_codegen_utils::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, rustc_codegen_utils::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, rustc_codegen_utils::common::AtomicOrdering::Release => AtomicOrdering::Release, - rustc_codegen_utils::common::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, + rustc_codegen_utils::common::AtomicOrdering::AcquireRelease => + AtomicOrdering::AcquireRelease, rustc_codegen_utils::common::AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent } @@ -332,9 +335,12 @@ pub enum SynchronizationScope { impl SynchronizationScope { pub fn from_generic(sc : rustc_codegen_utils::common::SynchronizationScope) -> Self { match sc { - rustc_codegen_utils::common::SynchronizationScope::Other => SynchronizationScope::Other, - rustc_codegen_utils::common::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, - rustc_codegen_utils::common::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, + rustc_codegen_utils::common::SynchronizationScope::Other => + SynchronizationScope::Other, + rustc_codegen_utils::common::SynchronizationScope::SingleThread => + SynchronizationScope::SingleThread, + rustc_codegen_utils::common::SynchronizationScope::CrossThread => + SynchronizationScope::CrossThread, } } } From 31dee750188e1166d68cf49e58ca22e649fbb56c Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 1 Oct 2018 18:07:04 +0200 Subject: [PATCH 66/76] Beginning of moving all backend-agnostic code to rustc_codegen_ssa --- src/Cargo.lock | 13 ++ src/librustc_codegen_llvm/back/lto.rs | 2 +- src/librustc_codegen_llvm/back/write.rs | 2 +- src/librustc_codegen_llvm/base.rs | 4 +- src/librustc_codegen_llvm/builder.rs | 24 +-- src/librustc_codegen_llvm/common.rs | 2 +- src/librustc_codegen_llvm/declare.rs | 2 +- src/librustc_codegen_llvm/glue.rs | 2 +- .../interfaces/builder.rs | 2 +- src/librustc_codegen_llvm/interfaces/mod.rs | 3 +- src/librustc_codegen_llvm/interfaces/type_.rs | 2 +- src/librustc_codegen_llvm/intrinsic.rs | 6 +- src/librustc_codegen_llvm/lib.rs | 3 +- src/librustc_codegen_llvm/llvm/ffi.rs | 142 +++++++++--------- src/librustc_codegen_llvm/llvm/mod.rs | 4 +- src/librustc_codegen_llvm/mir/block.rs | 34 +++-- src/librustc_codegen_llvm/mir/mod.rs | 2 +- src/librustc_codegen_llvm/mir/operand.rs | 5 +- src/librustc_codegen_llvm/mir/place.rs | 2 +- src/librustc_codegen_llvm/mir/rvalue.rs | 2 +- src/librustc_codegen_llvm/type_.rs | 10 +- src/librustc_codegen_ssa/Cargo.toml | 18 +++ .../common.rs | 0 .../interfaces/backend.rs | 0 .../interfaces/mod.rs | 0 src/librustc_codegen_ssa/lib.rs | 113 ++++++++++++++ src/librustc_codegen_utils/lib.rs | 73 --------- src/librustc_driver/Cargo.toml | 1 + src/librustc_driver/lib.rs | 1 + 29 files changed, 274 insertions(+), 200 deletions(-) create mode 100644 src/librustc_codegen_ssa/Cargo.toml rename src/{librustc_codegen_utils => librustc_codegen_ssa}/common.rs (100%) rename src/{librustc_codegen_utils => librustc_codegen_ssa}/interfaces/backend.rs (100%) rename src/{librustc_codegen_utils => librustc_codegen_ssa}/interfaces/mod.rs (100%) create mode 100644 src/librustc_codegen_ssa/lib.rs diff --git a/src/Cargo.lock b/src/Cargo.lock index 3361e81ecfe6d..a33c726f36053 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2128,6 +2128,18 @@ dependencies = [ "rustc_llvm 0.0.0", ] +[[package]] +name = "rustc_codegen_ssa" +version = "0.0.0" +dependencies = [ + "rustc 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_mir 0.0.0", + "rustc_target 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + [[package]] name = "rustc_codegen_utils" version = "0.0.0" @@ -2184,6 +2196,7 @@ dependencies = [ "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", "rustc_borrowck 0.0.0", + "rustc_codegen_ssa 0.0.0", "rustc_codegen_utils 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index fd6bf637ad6fa..2328c03b37770 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -25,7 +25,7 @@ use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; use ModuleLlvm; -use rustc_codegen_utils::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use libc; diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 460d1bd96a48b..561950c5e5b46 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -30,7 +30,7 @@ use time_graph::{self, TimeGraph, Timeline}; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; use {CodegenResults, ModuleLlvm}; -use rustc_codegen_utils::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule}; use CrateInfo; use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc::ty::TyCtxt; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 855a78ab50f83..e437cb0ffb409 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -24,7 +24,7 @@ //! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. use super::ModuleLlvm; -use rustc_codegen_utils::{ModuleCodegen, ModuleKind, CachedModuleCodegen}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, CachedModuleCodegen}; use super::LlvmCodegenBackend; use abi; @@ -53,7 +53,7 @@ use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; use rustc_mir::monomorphize::item::DefPathBasedNames; use common; -use rustc_codegen_utils::common::{RealPredicate, TypeKind, IntPredicate}; +use rustc_codegen_ssa::common::{RealPredicate, TypeKind, IntPredicate}; use meth; use mir; use context::CodegenCx; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 61009a58f2795..45ce932597fa8 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -10,8 +10,8 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; -use rustc_codegen_utils::common::{IntPredicate, TypeKind, RealPredicate}; -use rustc_codegen_utils; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; +use rustc_codegen_ssa; use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; @@ -198,7 +198,7 @@ impl BuilderMethods<'a, 'll, 'tcx> args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, - bundle: Option<&rustc_codegen_utils::common::OperandBundleDef<'ll, &'ll Value>> + bundle: Option<&rustc_codegen_ssa::common::OperandBundleDef<'ll, &'ll Value>> ) -> &'ll Value { self.count_insn("invoke"); @@ -516,7 +516,7 @@ impl BuilderMethods<'a, 'll, 'tcx> fn atomic_load( &self, ptr: &'ll Value, - order: rustc_codegen_utils::common::AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, align: Align ) -> &'ll Value { self.count_insn("load.atomic"); @@ -638,7 +638,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: rustc_codegen_utils::common::AtomicOrdering, align: Align) { + order: rustc_codegen_ssa::common::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); @@ -1170,8 +1170,8 @@ impl BuilderMethods<'a, 'll, 'tcx> dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: rustc_codegen_utils::common::AtomicOrdering, - failure_order: rustc_codegen_utils::common::AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, + failure_order: rustc_codegen_ssa::common::AtomicOrdering, weak: bool, ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; @@ -1189,10 +1189,10 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_rmw( &self, - op: rustc_codegen_utils::common::AtomicRmwBinOp, + op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: rustc_codegen_utils::common::AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { unsafe { llvm::LLVMBuildAtomicRMW( @@ -1207,8 +1207,8 @@ impl BuilderMethods<'a, 'll, 'tcx> fn atomic_fence( &self, - order: rustc_codegen_utils::common::AtomicOrdering, - scope: rustc_codegen_utils::common::SynchronizationScope + order: rustc_codegen_ssa::common::AtomicOrdering, + scope: rustc_codegen_ssa::common::SynchronizationScope ) { unsafe { llvm::LLVMRustBuildAtomicFence( @@ -1328,7 +1328,7 @@ impl BuilderMethods<'a, 'll, 'tcx> &self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&rustc_codegen_utils::common::OperandBundleDef<'ll, &'ll Value>> + bundle: Option<&rustc_codegen_ssa::common::OperandBundleDef<'ll, &'ll Value>> ) -> &'ll Value { self.count_insn("call"); diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index c46a103015ff3..a86e7234b6c20 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -30,7 +30,7 @@ use rustc::hir; use interfaces::BuilderMethods; use mir::constant::const_alloc_to_llvm; use mir::place::PlaceRef; -use rustc_codegen_utils::common::TypeKind; +use rustc_codegen_ssa::common::TypeKind; use libc::{c_uint, c_char}; use std::iter; diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 7ef37affaed83..ab87490b74f5c 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -130,7 +130,7 @@ impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { let fty = FnType::new(self, sig, &[]); let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); - if self.layout_of(sig.output()).abi == layout::Abi::Uninhabited { + if self.layout_of(sig.output()).abi.is_uninhabited() { llvm::Attribute::NoReturn.apply_llfn(Function, llfn); } diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs index f0868962af074..cf82316c559c7 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -14,7 +14,7 @@ use std; -use rustc_codegen_utils::common::IntPredicate; +use rustc_codegen_ssa::common::IntPredicate; use meth; use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::ty::{self, Ty}; diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs index b79b15c228665..bfc22f8e064e2 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_llvm/interfaces/builder.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_codegen_utils::common::{IntPredicate, RealPredicate, AtomicOrdering, +use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; use libc::c_char; use rustc::ty::TyCtxt; diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 696316338ce20..33cec84ef53f0 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -20,7 +20,7 @@ mod declare; mod asm; pub use self::builder::{BuilderMethods, HasCodegen}; -pub use rustc_codegen_utils::interfaces::{Backend, BackendMethods}; +pub use rustc_codegen_ssa::interfaces::{Backend, BackendMethods, CodegenObject}; pub use self::consts::ConstMethods; pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, ArgTypeMethods}; @@ -31,7 +31,6 @@ pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; pub use self::abi::{AbiMethods, AbiBuilderMethods}; pub use self::declare::{DeclareMethods, PreDefineMethods}; pub use self::asm::{AsmMethods, AsmBuilderMethods}; -pub use rustc_codegen_utils::interfaces::CodegenObject; pub trait CodegenMethods<'ll, 'tcx: 'll> : Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs index ad35523faea79..d31237c6f0999 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_llvm/interfaces/type_.rs @@ -10,7 +10,7 @@ use super::Backend; use super::builder::HasCodegen; -use rustc_codegen_utils::common::TypeKind; +use rustc_codegen_ssa::common::TypeKind; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use std::cell::RefCell; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index c53d277e39633..7d8b3c8d61741 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -23,7 +23,7 @@ use type_::Type; use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; -use rustc_codegen_utils::common::TypeKind; +use rustc_codegen_ssa::common::TypeKind; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -424,8 +424,8 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst name if name.starts_with("atomic_") => { - use rustc_codegen_utils::common::AtomicOrdering::*; - use rustc_codegen_utils::common:: + use rustc_codegen_ssa::common::AtomicOrdering::*; + use rustc_codegen_ssa::common:: {SynchronizationScope, AtomicRmwBinOp}; let split: Vec<&str> = name.split('_').collect(); diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 78108718205f3..1afc37e40f49f 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -55,6 +55,7 @@ extern crate rustc_incremental; extern crate rustc_llvm; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_codegen_utils; +extern crate rustc_codegen_ssa; extern crate rustc_fs_util; #[macro_use] extern crate log; @@ -91,7 +92,7 @@ use rustc::util::time_graph; use rustc::util::nodemap::{FxHashSet, FxHashMap}; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; -use rustc_codegen_utils::{ModuleCodegen, CompiledModule}; +use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; use rustc_codegen_utils::codegen_backend::CodegenBackend; use rustc_data_structures::svh::Svh; diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 90be85abda47d..fe1197c2fe8cc 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,8 +19,8 @@ use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; -use rustc_codegen_utils; use syntax; +use rustc_codegen_ssa; use super::RustString; @@ -144,18 +144,18 @@ pub enum IntPredicate { } impl IntPredicate { - pub fn from_generic(intpre: rustc_codegen_utils::common::IntPredicate) -> Self { + pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self { match intpre { - rustc_codegen_utils::common::IntPredicate::IntEQ => IntPredicate::IntEQ, - rustc_codegen_utils::common::IntPredicate::IntNE => IntPredicate::IntNE, - rustc_codegen_utils::common::IntPredicate::IntUGT => IntPredicate::IntUGT, - rustc_codegen_utils::common::IntPredicate::IntUGE => IntPredicate::IntUGE, - rustc_codegen_utils::common::IntPredicate::IntULT => IntPredicate::IntULT, - rustc_codegen_utils::common::IntPredicate::IntULE => IntPredicate::IntULE, - rustc_codegen_utils::common::IntPredicate::IntSGT => IntPredicate::IntSGT, - rustc_codegen_utils::common::IntPredicate::IntSGE => IntPredicate::IntSGE, - rustc_codegen_utils::common::IntPredicate::IntSLT => IntPredicate::IntSLT, - rustc_codegen_utils::common::IntPredicate::IntSLE => IntPredicate::IntSLE, + rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ, + rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE, + rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT, + rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE, + rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT, + rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE, + rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT, + rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE, + rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT, + rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE, } } } @@ -183,25 +183,25 @@ pub enum RealPredicate { } impl RealPredicate { - pub fn from_generic(realpred: rustc_codegen_utils::common::RealPredicate) -> Self { + pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self { match realpred { - rustc_codegen_utils::common::RealPredicate::RealPredicateFalse => + rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => RealPredicate::RealPredicateFalse, - rustc_codegen_utils::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, - rustc_codegen_utils::common::RealPredicate::RealOGT => RealPredicate::RealOGT, - rustc_codegen_utils::common::RealPredicate::RealOGE => RealPredicate::RealOGE, - rustc_codegen_utils::common::RealPredicate::RealOLT => RealPredicate::RealOLT, - rustc_codegen_utils::common::RealPredicate::RealOLE => RealPredicate::RealOLE, - rustc_codegen_utils::common::RealPredicate::RealONE => RealPredicate::RealONE, - rustc_codegen_utils::common::RealPredicate::RealORD => RealPredicate::RealORD, - rustc_codegen_utils::common::RealPredicate::RealUNO => RealPredicate::RealUNO, - rustc_codegen_utils::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, - rustc_codegen_utils::common::RealPredicate::RealUGT => RealPredicate::RealUGT, - rustc_codegen_utils::common::RealPredicate::RealUGE => RealPredicate::RealUGE, - rustc_codegen_utils::common::RealPredicate::RealULT => RealPredicate::RealULT, - rustc_codegen_utils::common::RealPredicate::RealULE => RealPredicate::RealULE, - rustc_codegen_utils::common::RealPredicate::RealUNE => RealPredicate::RealUNE, - rustc_codegen_utils::common::RealPredicate::RealPredicateTrue => + rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT, + rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE, + rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT, + rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE, + rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE, + rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD, + rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO, + rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT, + rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE, + rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT, + rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE, + rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE, + rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => RealPredicate::RealPredicateTrue } } @@ -231,25 +231,25 @@ pub enum TypeKind { } impl TypeKind { - pub fn to_generic(self) -> rustc_codegen_utils::common::TypeKind { + pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind { match self { - TypeKind::Void => rustc_codegen_utils::common::TypeKind::Void, - TypeKind::Half => rustc_codegen_utils::common::TypeKind::Half, - TypeKind::Float => rustc_codegen_utils::common::TypeKind::Float, - TypeKind::Double => rustc_codegen_utils::common::TypeKind::Double, - TypeKind::X86_FP80 => rustc_codegen_utils::common::TypeKind::X86_FP80, - TypeKind::FP128 => rustc_codegen_utils::common::TypeKind::FP128, - TypeKind::PPC_FP128 => rustc_codegen_utils::common::TypeKind::PPC_FP128, - TypeKind::Label => rustc_codegen_utils::common::TypeKind::Label, - TypeKind::Integer => rustc_codegen_utils::common::TypeKind::Integer, - TypeKind::Function => rustc_codegen_utils::common::TypeKind::Function, - TypeKind::Struct => rustc_codegen_utils::common::TypeKind::Struct, - TypeKind::Array => rustc_codegen_utils::common::TypeKind::Array, - TypeKind::Pointer => rustc_codegen_utils::common::TypeKind::Pointer, - TypeKind::Vector => rustc_codegen_utils::common::TypeKind::Vector, - TypeKind::Metadata => rustc_codegen_utils::common::TypeKind::Metadata, - TypeKind::X86_MMX => rustc_codegen_utils::common::TypeKind::X86_MMX, - TypeKind::Token => rustc_codegen_utils::common::TypeKind::Token, + TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void, + TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half, + TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float, + TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double, + TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80, + TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128, + TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128, + TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label, + TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer, + TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function, + TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct, + TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array, + TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer, + TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector, + TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata, + TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX, + TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token, } } } @@ -272,19 +272,19 @@ pub enum AtomicRmwBinOp { } impl AtomicRmwBinOp { - pub fn from_generic(op : rustc_codegen_utils::common::AtomicRmwBinOp) -> Self { + pub fn from_generic(op : rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self { match op { - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, - rustc_codegen_utils::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin } } } @@ -305,16 +305,16 @@ pub enum AtomicOrdering { } impl AtomicOrdering { - pub fn from_generic(ao : rustc_codegen_utils::common::AtomicOrdering) -> Self { + pub fn from_generic(ao : rustc_codegen_ssa::common::AtomicOrdering) -> Self { match ao { - rustc_codegen_utils::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, - rustc_codegen_utils::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, - rustc_codegen_utils::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, - rustc_codegen_utils::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, - rustc_codegen_utils::common::AtomicOrdering::Release => AtomicOrdering::Release, - rustc_codegen_utils::common::AtomicOrdering::AcquireRelease => + rustc_codegen_ssa::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + rustc_codegen_ssa::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release, + rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, - rustc_codegen_utils::common::AtomicOrdering::SequentiallyConsistent => + rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent } } @@ -333,13 +333,13 @@ pub enum SynchronizationScope { } impl SynchronizationScope { - pub fn from_generic(sc : rustc_codegen_utils::common::SynchronizationScope) -> Self { + pub fn from_generic(sc : rustc_codegen_ssa::common::SynchronizationScope) -> Self { match sc { - rustc_codegen_utils::common::SynchronizationScope::Other => + rustc_codegen_ssa::common::SynchronizationScope::Other => SynchronizationScope::Other, - rustc_codegen_utils::common::SynchronizationScope::SingleThread => + rustc_codegen_ssa::common::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread, - rustc_codegen_utils::common::SynchronizationScope::CrossThread => + rustc_codegen_ssa::common::SynchronizationScope::CrossThread => SynchronizationScope::CrossThread, } } diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index b9f0fca05f90a..a4b75340203af 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -28,7 +28,7 @@ use std::ffi::CStr; use std::cell::RefCell; use libc::{self, c_uint, c_char, size_t}; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_codegen_utils; +use rustc_codegen_ssa; pub mod archive_ro; pub mod diagnostic; @@ -274,7 +274,7 @@ impl OperandBundleDef<'a> { } pub fn from_generic( - bundle : &rustc_codegen_utils::common::OperandBundleDef<'a, &'a Value> + bundle : &rustc_codegen_ssa::common::OperandBundleDef<'a, &'a Value> ) -> Self { Self::new(bundle.name, &[bundle.val]) } diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs index f95806ea2b878..cc50243d398c0 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -18,7 +18,7 @@ use rustc_target::abi::call::ArgType; use base; use builder::MemFlags; use common; -use rustc_codegen_utils::common::IntPredicate; +use rustc_codegen_ssa::common::IntPredicate; use meth; use monomorphize; @@ -507,11 +507,11 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> if (intrinsic == Some("init") || intrinsic == Some("uninit")) && fn_ty.ret.layout.abi.is_uninhabited() { - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = bx.cx().const_str_slice(filename); + let line = bx.cx().const_u32(loc.line as u32); + let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -522,26 +522,28 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> if intrinsic == Some("init") { "zeroed" } else { "uninitialized" } ); let msg_str = Symbol::intern(&str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.cx().const_str_slice(msg_str); + let msg_file_line_col = bx.cx().const_struct( + &[msg_str, filename, line, col], + false + ); + let msg_file_line_col = bx.cx().static_addr_of( + msg_file_line_col, + align, + Some("panic_loc") + ); // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.cx().fn_type_of_instance(&instance); + let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. do_call( self, - bx, + &bx, fn_ty, llfn, &[msg_file_line_col], diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs index ce290c87c2146..ac7790b177c26 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -16,7 +16,7 @@ use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; use base; -use rustc_codegen_utils::common::Funclet; +use rustc_codegen_ssa::common::Funclet; use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; use abi::{FnType, PassMode}; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 11dc6cf61e7dd..80599fb2e0566 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -10,7 +10,7 @@ use rustc::mir::interpret::{ConstValue, ConstEvalErr}; use rustc::mir; -use rustc::ty; +use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasTyCtxt}; use rustc_data_structures::sync::Lrc; @@ -115,8 +115,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { bx.cx().scalar_pair_element_backend_type(&layout, 0, true), ); let b_layout = bx.cx().scalar_pair_element_backend_type(&layout, 1, true); - let b_llval = scalar_to_llvm( - bx.cx(), + let b_llval = bx.cx().scalar_to_backend( b, b_scalar, b_layout, diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index 39eb45b7d8ee3..e4b17300b4741 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -12,7 +12,7 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use rustc_codegen_utils::common::IntPredicate; +use rustc_codegen_ssa::common::IntPredicate; use type_of::LayoutLlvmExt; use glue; diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs index dd64586d564cb..95e76fe9cc727 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -19,7 +19,7 @@ use std::{u128, i128}; use base; use callee; use common; -use rustc_codegen_utils::common::{RealPredicate, IntPredicate}; +use rustc_codegen_ssa::common::{RealPredicate, IntPredicate}; use monomorphize; use type_of::LayoutLlvmExt; diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 19e2414dc308c..69b5ea188f6d7 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -27,8 +27,8 @@ use rustc::ty::layout::TyLayout; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; use common; -use rustc_codegen_utils; -use rustc_codegen_utils::common::TypeKind; +use rustc_codegen_ssa; +use rustc_codegen_ssa::common::TypeKind; use type_of::LayoutLlvmExt; use abi::{LlvmType, FnTypeExt}; @@ -374,15 +374,15 @@ impl DerivedTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - rustc_codegen_utils::common::type_needs_drop(*self.tcx(), ty) + rustc_codegen_ssa::common::type_needs_drop(*self.tcx(), ty) } fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - rustc_codegen_utils::common::type_is_sized(*self.tcx(), ty) + rustc_codegen_ssa::common::type_is_sized(*self.tcx(), ty) } fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - rustc_codegen_utils::common::type_is_freeze(*self.tcx(), ty) + rustc_codegen_ssa::common::type_is_freeze(*self.tcx(), ty) } fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml new file mode 100644 index 0000000000000..8fdf35f96dc87 --- /dev/null +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_codegen_ssa" +version = "0.0.0" + +[lib] +name = "rustc_codegen_ssa" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +rustc = { path = "../librustc" } +rustc_target = { path = "../librustc_target" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_mir = { path = "../librustc_mir" } diff --git a/src/librustc_codegen_utils/common.rs b/src/librustc_codegen_ssa/common.rs similarity index 100% rename from src/librustc_codegen_utils/common.rs rename to src/librustc_codegen_ssa/common.rs diff --git a/src/librustc_codegen_utils/interfaces/backend.rs b/src/librustc_codegen_ssa/interfaces/backend.rs similarity index 100% rename from src/librustc_codegen_utils/interfaces/backend.rs rename to src/librustc_codegen_ssa/interfaces/backend.rs diff --git a/src/librustc_codegen_utils/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs similarity index 100% rename from src/librustc_codegen_utils/interfaces/mod.rs rename to src/librustc_codegen_ssa/interfaces/mod.rs diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs new file mode 100644 index 0000000000000..e8e6222f87c12 --- /dev/null +++ b/src/librustc_codegen_ssa/lib.rs @@ -0,0 +1,113 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(box_patterns)] +#![feature(box_syntax)] +#![feature(custom_attribute)] +#![feature(nll)] +#![allow(unused_attributes)] +#![allow(dead_code)] +#![feature(quote)] +#![feature(rustc_diagnostic_macros)] + +#![recursion_limit="256"] + +extern crate rustc; +extern crate rustc_target; +extern crate rustc_mir; +extern crate syntax; +extern crate syntax_pos; +extern crate rustc_data_structures; + +use std::path::PathBuf; +use rustc::dep_graph::WorkProduct; +use rustc::session::config::{OutputFilenames, OutputType}; + +pub mod common; +pub mod interfaces; + +pub struct ModuleCodegen { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + /// We currently generate these names via CodegenUnit::build_cgu_name(). + pub name: String, + pub module_llvm: M, + pub kind: ModuleKind, +} + +pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z"; + +impl ModuleCodegen { + pub fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + emit_bc_compressed: bool, + outputs: &OutputFilenames) -> CompiledModule { + let object = if emit_obj { + Some(outputs.temp_path(OutputType::Object, Some(&self.name))) + } else { + None + }; + let bytecode = if emit_bc { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) + } else { + None + }; + let bytecode_compressed = if emit_bc_compressed { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) + .with_extension(RLIB_BYTECODE_EXTENSION)) + } else { + None + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + object, + bytecode, + bytecode_compressed, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub object: Option, + pub bytecode: Option, + pub bytecode_compressed: Option, +} + +pub struct CachedModuleCodegen { + pub name: String, + pub source: WorkProduct, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + + +__build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index 0b2089ea6aaef..2d1f12b2d278d 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -41,85 +41,12 @@ extern crate syntax_pos; #[macro_use] extern crate rustc_data_structures; extern crate rustc_metadata_utils; -use std::path::PathBuf; use rustc::ty::TyCtxt; -use rustc::dep_graph::WorkProduct; -use rustc::session::config::{OutputFilenames, OutputType}; pub mod link; pub mod codegen_backend; pub mod symbol_names; pub mod symbol_names_test; -pub mod common; -pub mod interfaces; - -pub struct ModuleCodegen { - /// The name of the module. When the crate may be saved between - /// compilations, incremental compilation requires that name be - /// unique amongst **all** crates. Therefore, it should contain - /// something unique to this crate (e.g., a module path) as well - /// as the crate name and disambiguator. - /// We currently generate these names via CodegenUnit::build_cgu_name(). - pub name: String, - pub module_llvm: M, - pub kind: ModuleKind, -} - -pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z"; - -impl ModuleCodegen { - pub fn into_compiled_module(self, - emit_obj: bool, - emit_bc: bool, - emit_bc_compressed: bool, - outputs: &OutputFilenames) -> CompiledModule { - let object = if emit_obj { - Some(outputs.temp_path(OutputType::Object, Some(&self.name))) - } else { - None - }; - let bytecode = if emit_bc { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) - } else { - None - }; - let bytecode_compressed = if emit_bc_compressed { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) - .with_extension(RLIB_BYTECODE_EXTENSION)) - } else { - None - }; - - CompiledModule { - name: self.name.clone(), - kind: self.kind, - object, - bytecode, - bytecode_compressed, - } - } -} - -#[derive(Debug)] -pub struct CompiledModule { - pub name: String, - pub kind: ModuleKind, - pub object: Option, - pub bytecode: Option, - pub bytecode_compressed: Option, -} - -pub struct CachedModuleCodegen { - pub name: String, - pub source: WorkProduct, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -pub enum ModuleKind { - Regular, - Metadata, - Allocator, -} /// check for the #[rustc_error] annotation, which forces an /// error in codegen. This is used to write compile-fail tests diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 470c8b03d0bca..2587a707510f6 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -32,6 +32,7 @@ rustc_resolve = { path = "../librustc_resolve" } rustc_save_analysis = { path = "../librustc_save_analysis" } rustc_traits = { path = "../librustc_traits" } rustc_codegen_utils = { path = "../librustc_codegen_utils" } +rustc_codegen_ssa = { path = "../librustc_codegen_ssa" } rustc_typeck = { path = "../librustc_typeck" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 276b7290c2ef0..58539846abff8 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -54,6 +54,7 @@ extern crate rustc_resolve; extern crate rustc_save_analysis; extern crate rustc_traits; extern crate rustc_codegen_utils; +extern crate rustc_codegen_ssa; extern crate rustc_typeck; extern crate scoped_tls; extern crate serialize; From 26e18a1907be6d6950f17e2cfe75697007927db8 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 2 Oct 2018 10:49:54 +0200 Subject: [PATCH 67/76] Moved DeclareMethods, MiscMethods and StaticMethods --- src/librustc_codegen_llvm/interfaces/mod.rs | 9 ++------- .../interfaces/declare.rs | 2 +- .../interfaces/misc.rs | 2 +- src/librustc_codegen_ssa/interfaces/mod.rs | 6 ++++++ .../interfaces/statics.rs | 0 src/librustc_codegen_ssa/lib.rs | 2 ++ 6 files changed, 12 insertions(+), 9 deletions(-) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/declare.rs (98%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/misc.rs (96%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/statics.rs (100%) diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 33cec84ef53f0..9de2c79f3bcf7 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -12,24 +12,19 @@ mod builder; mod consts; mod type_; mod intrinsic; -mod statics; -mod misc; mod debuginfo; mod abi; -mod declare; mod asm; pub use self::builder::{BuilderMethods, HasCodegen}; -pub use rustc_codegen_ssa::interfaces::{Backend, BackendMethods, CodegenObject}; +pub use rustc_codegen_ssa::interfaces::{Backend, BackendMethods, CodegenObject, MiscMethods, + StaticMethods, DeclareMethods, PreDefineMethods}; pub use self::consts::ConstMethods; pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, ArgTypeMethods}; pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; -pub use self::statics::StaticMethods; -pub use self::misc::MiscMethods; pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; pub use self::abi::{AbiMethods, AbiBuilderMethods}; -pub use self::declare::{DeclareMethods, PreDefineMethods}; pub use self::asm::{AsmMethods, AsmBuilderMethods}; pub trait CodegenMethods<'ll, 'tcx: 'll> : diff --git a/src/librustc_codegen_llvm/interfaces/declare.rs b/src/librustc_codegen_ssa/interfaces/declare.rs similarity index 98% rename from src/librustc_codegen_llvm/interfaces/declare.rs rename to src/librustc_codegen_ssa/interfaces/declare.rs index 9a88a2fe3a208..64b1aff648464 100644 --- a/src/librustc_codegen_llvm/interfaces/declare.rs +++ b/src/librustc_codegen_ssa/interfaces/declare.rs @@ -12,7 +12,7 @@ use rustc::ty::Ty; use super::Backend; use rustc::hir::def_id::DefId; use rustc::mir::mono::{Linkage, Visibility}; -use monomorphize::Instance; +use rustc_mir::monomorphize::Instance; pub trait DeclareMethods<'ll, 'tcx: 'll> : Backend<'ll> { diff --git a/src/librustc_codegen_llvm/interfaces/misc.rs b/src/librustc_codegen_ssa/interfaces/misc.rs similarity index 96% rename from src/librustc_codegen_llvm/interfaces/misc.rs rename to src/librustc_codegen_ssa/interfaces/misc.rs index 70ab7ec1419e6..e01fb60a3c501 100644 --- a/src/librustc_codegen_llvm/interfaces/misc.rs +++ b/src/librustc_codegen_ssa/interfaces/misc.rs @@ -16,7 +16,7 @@ use rustc::session::Session; use libc::c_uint; use rustc::mir::mono::Stats; use std::sync::Arc; -use monomorphize::partitioning::CodegenUnit; +use rustc_mir::monomorphize::partitioning::CodegenUnit; pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn vtables(&self) -> &RefCell, diff --git a/src/librustc_codegen_ssa/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs index 2659f0427c8b2..155730f3fc9cc 100644 --- a/src/librustc_codegen_ssa/interfaces/mod.rs +++ b/src/librustc_codegen_ssa/interfaces/mod.rs @@ -10,7 +10,13 @@ use std::fmt; mod backend; +mod misc; +mod statics; +mod declare; pub use self::backend::{Backend, BackendMethods}; +pub use self::misc::MiscMethods; +pub use self::statics::StaticMethods; +pub use self::declare::{DeclareMethods, PreDefineMethods}; pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_llvm/interfaces/statics.rs b/src/librustc_codegen_ssa/interfaces/statics.rs similarity index 100% rename from src/librustc_codegen_llvm/interfaces/statics.rs rename to src/librustc_codegen_ssa/interfaces/statics.rs diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs index e8e6222f87c12..7c062781fc0ad 100644 --- a/src/librustc_codegen_ssa/lib.rs +++ b/src/librustc_codegen_ssa/lib.rs @@ -19,6 +19,7 @@ #![feature(box_patterns)] #![feature(box_syntax)] #![feature(custom_attribute)] +#![feature(libc)] #![feature(nll)] #![allow(unused_attributes)] #![allow(dead_code)] @@ -33,6 +34,7 @@ extern crate rustc_mir; extern crate syntax; extern crate syntax_pos; extern crate rustc_data_structures; +extern crate libc; use std::path::PathBuf; use rustc::dep_graph::WorkProduct; From 431601cc0fe219d8727300eff37d1a5fe55ac9d1 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 3 Oct 2018 13:49:57 +0200 Subject: [PATCH 68/76] Greate separation of librsutc_codegen_llvm : librustc_codegen_ssa compiles --- src/librustc_codegen_llvm/base.rs | 1037 +--------------- src/librustc_codegen_llvm/builder.rs | 8 - src/librustc_codegen_llvm/callee.rs | 15 - src/librustc_codegen_llvm/common.rs | 134 -- src/librustc_codegen_llvm/consts.rs | 55 + src/librustc_codegen_llvm/context.rs | 3 + .../debuginfo/create_scope_map.rs | 15 - src/librustc_codegen_llvm/debuginfo/mod.rs | 56 +- .../debuginfo/source_loc.rs | 12 - src/librustc_codegen_llvm/interfaces/mod.rs | 27 +- src/librustc_codegen_llvm/lib.rs | 27 +- src/librustc_codegen_llvm/mono-item.rs | 99 ++ src/librustc_codegen_llvm/type_.rs | 6 + src/librustc_codegen_ssa/Cargo.toml | 6 + src/librustc_codegen_ssa/base.rs | 1103 +++++++++++++++++ src/librustc_codegen_ssa/callee.rs | 29 + src/librustc_codegen_ssa/common.rs | 154 ++- src/librustc_codegen_ssa/debuginfo.rs | 92 ++ src/librustc_codegen_ssa/diagnostics.rs | 37 + .../glue.rs | 2 +- .../interfaces/abi.rs | 2 +- .../interfaces/asm.rs | 0 .../interfaces/backend.rs | 4 +- .../interfaces/builder.rs | 4 +- .../interfaces/consts.rs | 0 .../interfaces/debuginfo.rs | 11 +- .../interfaces/intrinsic.rs | 2 +- src/librustc_codegen_ssa/interfaces/misc.rs | 1 + src/librustc_codegen_ssa/interfaces/mod.rs | 22 + .../interfaces/type_.rs | 4 +- src/librustc_codegen_ssa/lib.rs | 57 +- .../meth.rs | 4 +- .../mir/analyze.rs | 7 +- .../mir/block.rs | 11 +- .../mir/constant.rs | 9 +- .../mir/mod.rs | 22 +- .../mir/operand.rs | 59 +- .../mir/place.rs | 5 +- .../mir/rvalue.rs | 16 +- .../mir/statement.rs | 0 .../mono_item.rs | 78 +- src/librustc_codegen_ssa/type_of.rs | 0 42 files changed, 1760 insertions(+), 1475 deletions(-) create mode 100644 src/librustc_codegen_llvm/mono-item.rs create mode 100644 src/librustc_codegen_ssa/base.rs create mode 100644 src/librustc_codegen_ssa/callee.rs create mode 100644 src/librustc_codegen_ssa/debuginfo.rs create mode 100644 src/librustc_codegen_ssa/diagnostics.rs rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/glue.rs (99%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/abi.rs (96%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/asm.rs (100%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/builder.rs (99%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/consts.rs (100%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/debuginfo.rs (84%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/intrinsic.rs (97%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/interfaces/type_.rs (96%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/meth.rs (98%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/analyze.rs (98%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/block.rs (99%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/constant.rs (95%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/mod.rs (97%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/operand.rs (95%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/place.rs (99%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/rvalue.rs (99%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mir/statement.rs (100%) rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/mono_item.rs (58%) create mode 100644 src/librustc_codegen_ssa/type_of.rs diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index e437cb0ffb409..a19fbbc506d7d 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -88,506 +88,6 @@ use mir::operand::OperandValue; use rustc_codegen_utils::check_for_rustc_errors_attr; use std::marker::PhantomData; -pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { - cx: &'a Cx, - name: Option, - istart: usize, - phantom: PhantomData<(&'ll (), &'tcx ())> -} - -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> StatRecorder<'a, 'll, 'tcx, Cx> { - pub fn new(cx: &'a Cx, name: String) -> Self { - let istart = cx.stats().borrow().n_llvm_insns; - StatRecorder { - cx, - name: Some(name), - istart, - phantom: PhantomData - } - } -} - -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Drop for - StatRecorder<'a, 'll, 'tcx, Cx> -{ - fn drop(&mut self) { - if self.cx.sess().codegen_stats() { - let mut stats = self.cx.stats().borrow_mut(); - let iend = stats.n_llvm_insns; - stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); - stats.n_fns += 1; - // Reset LLVM insn count to avoid compound costs. - stats.n_llvm_insns = self.istart; - } - } -} - -pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, - signed: bool) - -> IntPredicate { - match op { - hir::BinOpKind::Eq => IntPredicate::IntEQ, - hir::BinOpKind::Ne => IntPredicate::IntNE, - hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT }, - hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE }, - hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT }, - hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE }, - op => { - bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ - found {:?}", - op) - } - } -} - -pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { - match op { - hir::BinOpKind::Eq => RealPredicate::RealOEQ, - hir::BinOpKind::Ne => RealPredicate::RealUNE, - hir::BinOpKind::Lt => RealPredicate::RealOLT, - hir::BinOpKind::Le => RealPredicate::RealOLE, - hir::BinOpKind::Gt => RealPredicate::RealOGT, - hir::BinOpKind::Ge => RealPredicate::RealOGE, - op => { - bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ - found {:?}", - op); - } - } -} - -pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - lhs: >::Value, - rhs: >::Value, - t: Ty<'tcx>, - ret_ty: >::Type, - op: hir::BinOpKind -) -> >::Value { - let signed = match t.sty { - ty::Float(_) => { - let cmp = bin_op_to_fcmp_predicate(op); - return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); - }, - ty::Uint(_) => false, - ty::Int(_) => true, - _ => bug!("compare_simd_types: invalid SIMD type"), - }; - - let cmp = bin_op_to_icmp_predicate(op, signed); - // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension - // to get the correctly sized type. This will compile to a single instruction - // once the IR is converted to assembly if the SIMD instruction is supported - // by the target architecture. - bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) -} - -/// Retrieve the information we are losing (making dynamic) in an unsizing -/// adjustment. -/// -/// The `old_info` argument is a bit funny. It is intended for use -/// in an upcast, where the new vtable for an object will be derived -/// from the old one. -pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( - cx: &'a Cx, - source: Ty<'tcx>, - target: Ty<'tcx>, - old_info: Option, -) -> Cx::Value where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { - let (source, target) = cx.tcx().struct_lockstep_tails(source, target); - match (&source.sty, &target.sty) { - (&ty::Array(_, len), &ty::Slice(_)) => { - cx.const_usize(len.unwrap_usize(*cx.tcx())) - } - (&ty::Dynamic(..), &ty::Dynamic(..)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - old_info.expect("unsized_info: missing old info for trait upcast") - } - (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) - .field(cx, abi::FAT_PTR_EXTRA); - cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()), - cx.backend_type(&vtable_ptr)) - } - _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", - source, - target), - } -} - -/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - src: >::Value, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx> -) -> (>::Value, >::Value) where - &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> -{ - debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(_, a, _), - &ty::Ref(_, b, _)) | - (&ty::Ref(_, a, _), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx().type_is_sized(a)); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - let src_layout = bx.cx().layout_of(src_ty); - let dst_layout = bx.cx().layout_of(dst_ty); - let mut result = None; - for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx(), i); - assert_eq!(src_layout.fields.offset(i).bytes(), 0); - assert_eq!(dst_layout.fields.offset(i).bytes(), 0); - if src_f.is_zst() { - continue; - } - assert_eq!(src_layout.size, src_f.size); - - let dst_f = dst_layout.field(bx.cx(), i); - assert_ne!(src_f.ty, dst_f.ty); - assert_eq!(result, None); - result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); - } - let (lldata, llextra) = result.unwrap(); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(&dst_layout, 0, true)), - bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(&dst_layout, 1, true))) - } - _ => bug!("unsize_thin_ptr: called on bad types"), - } -} - -/// Coerce `src`, which is a reference to a value of type `src_ty`, -/// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - src: PlaceRef<'tcx, >::Value>, - dst: PlaceRef<'tcx, >::Value> -) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> -{ - let src_ty = src.layout.ty; - let dst_ty = dst.layout.ty; - let coerce_ptr = || { - let (base, info) = match bx.load_ref(&src).val { - OperandValue::Pair(base, info) => { - // fat-ptr to fat-ptr unsize preserves the vtable - // i.e. &'a fmt::Debug+Send => &'a fmt::Debug - // So we need to pointercast the base to ensure - // the types match up. - let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR); - (bx.pointercast(base, bx.cx().backend_type(&thin_ptr)), info) - } - OperandValue::Immediate(base) => { - unsize_thin_ptr(bx, base, src_ty, dst_ty) - } - OperandValue::Ref(..) => bug!() - }; - OperandValue::Pair(base, info).store(bx, dst); - }; - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(..), &ty::Ref(..)) | - (&ty::Ref(..), &ty::RawPtr(..)) | - (&ty::RawPtr(..), &ty::RawPtr(..)) => { - coerce_ptr() - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - coerce_ptr() - } - - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - for i in 0..def_a.variants[0].fields.len() { - let src_f = src.project_field(bx, i); - let dst_f = dst.project_field(bx, i); - - if dst_f.layout.is_zst() { - continue; - } - - if src_f.layout.ty == dst_f.layout.ty { - memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout, - src_f.align.min(dst_f.align), MemFlags::empty()); - } else { - coerce_unsized_into(bx, src_f, dst_f); - } - } - } - _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", - src_ty, - dst_ty), - } -} - -pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - op: hir::BinOpKind, - lhs: >::Value, - rhs: >::Value -) -> >::Value { - cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) -} - -fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - op: hir::BinOpKind, - lhs: >::Value, - rhs: >::Value, - trunc: F, - zext: G -) -> >::Value - where F: FnOnce( - >::Value, - >::Type - ) -> >::Value, - G: FnOnce( - >::Value, - >::Type - ) -> >::Value -{ - // Shifts may have any size int on the rhs - if op.is_shift() { - let mut rhs_llty = bx.cx().val_ty(rhs); - let mut lhs_llty = bx.cx().val_ty(lhs); - if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { - rhs_llty = bx.cx().element_type(rhs_llty) - } - if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { - lhs_llty = bx.cx().element_type(lhs_llty) - } - let rhs_sz = bx.cx().int_width(rhs_llty); - let lhs_sz = bx.cx().int_width(lhs_llty); - if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) - } else if lhs_sz > rhs_sz { - // FIXME (#1877: If in the future shifting by negative - // values is no longer undefined then this is wrong. - zext(rhs, lhs_llty) - } else { - rhs - } - } else { - rhs - } -} - -/// Returns whether this session's target will use SEH-based unwinding. -/// -/// This is only true for MSVC targets, and even then the 64-bit MSVC target -/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as -/// 64-bit MinGW) instead of "full SEH". -pub fn wants_msvc_seh(sess: &Session) -> bool { - sess.target.target.options.is_like_msvc -} - -pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( - bx: &Bx, - val: >::Value -) { - let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); - bx.call(assume_intrinsic, &[val], None); -} - -pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( - bx: &Bx, - val: >::Value -) -> >::Value { - if bx.cx().val_ty(val) == bx.cx().type_i1() { - bx.zext(val, bx.cx().type_i8()) - } else { - val - } -} - -pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - val: >::Value, - layout: layout::TyLayout, -) -> >::Value { - if let layout::Abi::Scalar(ref scalar) = layout.abi { - return to_immediate_scalar(bx, val, scalar); - } - val -} - -pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - val: >::Value, - scalar: &layout::Scalar, -) -> >::Value { - if scalar.is_bool() { - return bx.trunc(val, bx.cx().type_i1()); - } - val -} - -pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - dst: >::Value, - src: >::Value, - layout: TyLayout<'tcx>, - align: Align, - flags: MemFlags, -) { - let size = layout.size.bytes(); - if size == 0 { - return; - } - - bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); -} - -pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - cx: &'a Bx::CodegenCx, - instance: Instance<'tcx> -) where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { - let _s = if cx.sess().codegen_stats() { - let mut instance_name = String::new(); - DefPathBasedNames::new(*cx.tcx(), true, true) - .push_def_path(instance.def_id(), &mut instance_name); - Some(StatRecorder::new(cx, instance_name)) - } else { - None - }; - - // this is an info! to allow collecting monomorphization statistics - // and to allow finding the last function before LLVM aborts from - // release builds. - info!("codegen_instance({})", instance); - - let fn_ty = instance.ty(*cx.tcx()); - let sig = common::ty_fn_sig(cx, fn_ty); - let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - - let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(|| - bug!("Instance `{:?}` not already declared", instance)); - - cx.stats().borrow_mut().n_closures += 1; - - let mir = cx.tcx().instance_mir(instance.def); - mir::codegen_mir::<'a, 'll, 'tcx, Bx>( - cx, lldecl, &mir, instance, sig - ); -} - -pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { - let sect = match attrs.link_section { - Some(name) => name, - None => return, - }; - unsafe { - let buf = SmallCStr::new(§.as_str()); - llvm::LLVMSetSection(llval, buf.as_ptr()); - } -} - -/// Create the `main` function which will initialize the rust runtime and call -/// users main function. -fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - cx: &'a Bx::CodegenCx -) { - let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { - Some((id, span, _)) => { - (cx.tcx().hir.local_def_id(id), span) - } - None => return, - }; - - let instance = Instance::mono(*cx.tcx(), main_def_id); - - if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { - // We want to create the wrapper in the same codegen unit as Rust's main - // function. - return; - } - - let main_llfn = cx.get_fn(instance); - - let et = cx.sess().entry_fn.get().map(|e| e.2); - match et { - Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), - Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), - None => {} // Do nothing. - } - - fn create_entry_fn<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - cx: &'a Bx::CodegenCx, - sp: Span, - rust_main: >::Value, - rust_main_def_id: DefId, - use_start_lang_item: bool, - ) { - let llfty = - cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); - - let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); - // Given that `main()` has no arguments, - // then its return type cannot have - // late-bound regions, since late-bound - // regions must appear in the argument - // listing. - let main_ret_ty = cx.tcx().erase_regions( - &main_ret_ty.no_late_bound_regions().unwrap(), - ); - - if cx.get_defined_value("main").is_some() { - // FIXME: We should be smart and show a better diagnostic here. - cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") - .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") - .emit(); - cx.sess().abort_if_errors(); - bug!(); - } - let llfn = cx.declare_cfn("main", llfty); - - // `main` should respect same config for frame pointer elimination as rest of code - cx.set_frame_pointer_elimination(llfn); - cx.apply_target_cpu_attr(llfn); - - let bx = Bx::new_block(&cx, llfn, "top"); - - bx.insert_reference_to_gdb_debug_scripts_section_global(); - - // Params from native main() used as args for rust start function - let param_argc = cx.get_param(llfn, 0); - let param_argv = cx.get_param(llfn, 1); - let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); - let arg_argv = param_argv; - - let (start_fn, args) = if use_start_lang_item { - let start_def_id = cx.tcx().require_lang_item(StartFnLangItem); - let start_fn = callee::resolve_and_get_fn( - cx, - start_def_id, - cx.tcx().intern_substs(&[main_ret_ty.into()]), - ); - (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), - arg_argc, arg_argv]) - } else { - debug!("using user-defined start fn"); - (rust_main, vec![arg_argc, arg_argv]) - }; - - let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, cx.type_int(), true)); - } -} pub(crate) fn write_metadata<'a, 'gcx>( tcx: TyCtxt<'a, 'gcx, 'gcx>, @@ -682,490 +182,6 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> { } } -fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - cgu: &CodegenUnit<'tcx>) - -> CguReuse { - if !tcx.dep_graph.is_fully_enabled() { - return CguReuse::No - } - - let work_product_id = &cgu.work_product_id(); - if tcx.dep_graph.previous_work_product(work_product_id).is_none() { - // We don't have anything cached for this CGU. This can happen - // if the CGU did not exist in the previous session. - return CguReuse::No - } - - // Try to mark the CGU as green. If it we can do so, it means that nothing - // affecting the LLVM module has changed and we can re-use a cached version. - // If we compile with any kind of LTO, this means we can re-use the bitcode - // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only - // know that later). If we are not doing LTO, there is only one optimized - // version of each module, so we re-use that. - let dep_node = cgu.codegen_dep_node(tcx); - assert!(!tcx.dep_graph.dep_node_exists(&dep_node), - "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", - cgu.name()); - - if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { - // We can re-use either the pre- or the post-thinlto state - if tcx.sess.lto() != Lto::No { - CguReuse::PreLto - } else { - CguReuse::PostLto - } - } else { - CguReuse::No - } -} - -pub fn codegen_crate( - backend: B, - tcx: TyCtxt<'ll, 'tcx, 'tcx>, - rx: mpsc::Receiver> -) -> B::OngoingCodegen { - - check_for_rustc_errors_attr(tcx); - - if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { - if backend.thin_lto_available() { - tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); - } - } - - if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || - !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && - backend.pgo_available() - { - tcx.sess.fatal("this compiler's LLVM does not support PGO"); - } - - let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); - - // Codegen the metadata. - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); - - let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("metadata")).as_str() - .to_string(); - let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); - let metadata = time(tcx.sess, "write metadata", || { - backend.write_metadata(tcx, &metadata_llvm_module) - }); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); - - let metadata_module = ModuleCodegen { - name: metadata_cgu_name, - module_llvm: metadata_llvm_module, - kind: ModuleKind::Metadata, - }; - - let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { - Some(time_graph::TimeGraph::new()) - } else { - None - }; - - // Skip crate items and just output metadata in -Z no-codegen mode. - if tcx.sess.opts.debugging_opts.no_codegen || - !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = backend.start_async_codegen( - tcx, - time_graph.clone(), - metadata, - rx, - 1); - - backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); - backend.codegen_finished(&ongoing_codegen, tcx); - - assert_and_save_dep_graph(tcx); - - backend.check_for_errors(&ongoing_codegen, tcx.sess); - - return ongoing_codegen; - } - - // Run the monomorphization collector and partition the collected items into - // codegen units. - let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; - let codegen_units = (*codegen_units).clone(); - - // Force all codegen_unit queries so they are already either red or green - // when compile_codegen_unit accesses them. We are not able to re-execute - // the codegen_unit query from just the DepNode, so an unknown color would - // lead to having to re-execute compile_codegen_unit, possibly - // unnecessarily. - if tcx.dep_graph.is_fully_enabled() { - for cgu in &codegen_units { - tcx.codegen_unit(cgu.name().clone()); - } - } - - let ongoing_codegen = backend.start_async_codegen( - tcx, - time_graph.clone(), - metadata, - rx, - codegen_units.len()); - - // Codegen an allocator shim, if necessary. - // - // If the crate doesn't have an `allocator_kind` set then there's definitely - // no shim to generate. Otherwise we also check our dependency graph for all - // our output crate types. If anything there looks like its a `Dynamic` - // linkage, then it's already got an allocator shim and we'll be using that - // one instead. If nothing exists then it's our job to generate the - // allocator! - let any_dynamic_crate = tcx.sess.dependency_formats.borrow() - .iter() - .any(|(_, list)| { - use rustc::middle::dependency_format::Linkage; - list.iter().any(|&linkage| linkage == Linkage::Dynamic) - }); - let allocator_module = if any_dynamic_crate { - None - } else if let Some(kind) = *tcx.sess.allocator_kind.get() { - let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("allocator")).as_str() - .to_string(); - let modules = backend.new_metadata(tcx.sess, &llmod_id); - time(tcx.sess, "write allocator module", || { - backend.codegen_allocator(tcx, &modules, kind) - }); - - Some(ModuleCodegen { - name: llmod_id, - module_llvm: modules, - kind: ModuleKind::Allocator, - }) - } else { - None - }; - - if let Some(allocator_module) = allocator_module { - backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, allocator_module); - } - - backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); - - // We sort the codegen units by size. This way we can schedule work for LLVM - // a bit more efficiently. - let codegen_units = { - let mut codegen_units = codegen_units; - codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); - codegen_units - }; - - let mut total_codegen_time = Duration::new(0, 0); - let mut all_stats = Stats::default(); - - for cgu in codegen_units.into_iter() { - backend.wait_for_signal_to_codegen_item(&ongoing_codegen); - backend.check_for_errors(&ongoing_codegen, tcx.sess); - - let cgu_reuse = determine_cgu_reuse(tcx, &cgu); - tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); - - match cgu_reuse { - CguReuse::No => { - let _timing_guard = time_graph.as_ref().map(|time_graph| { - time_graph.start(write::CODEGEN_WORKER_TIMELINE, - write::CODEGEN_WORK_PACKAGE_KIND, - &format!("codegen {}", cgu.name())) - }); - let start_time = Instant::now(); - let stats = backend.compile_codegen_unit(tcx, *cgu.name()); - all_stats.extend(stats); - total_codegen_time += start_time.elapsed(); - false - } - CguReuse::PreLto => { - write::submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - CguReuse::PostLto => { - write::submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - }; - } - - backend.codegen_finished(&ongoing_codegen, tcx); - - // Since the main thread is sometimes blocked during codegen, we keep track - // -Ztime-passes output manually. - print_time_passes_entry(tcx.sess.time_passes(), - "codegen to LLVM IR", - total_codegen_time); - - rustc_incremental::assert_module_sources::assert_module_sources(tcx); - - symbol_names_test::report_symbol_names(tcx); - - if tcx.sess.codegen_stats() { - println!("--- codegen stats ---"); - println!("n_glues_created: {}", all_stats.n_glues_created); - println!("n_null_glues: {}", all_stats.n_null_glues); - println!("n_real_glues: {}", all_stats.n_real_glues); - - println!("n_fns: {}", all_stats.n_fns); - println!("n_inlines: {}", all_stats.n_inlines); - println!("n_closures: {}", all_stats.n_closures); - println!("fn stats:"); - all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); - for &(ref name, insns) in all_stats.fn_stats.iter() { - println!("{} insns, {}", insns, *name); - } - } - - if tcx.sess.count_llvm_insns() { - for (k, v) in all_stats.llvm_insns.iter() { - println!("{:7} {}", *v, *k); - } - } - - backend.check_for_errors(&ongoing_codegen, tcx.sess); - - assert_and_save_dep_graph(tcx); - ongoing_codegen -} - -fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { - time(tcx.sess, - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(tcx.sess, - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx)); -} - -fn collect_and_partition_mono_items<'ll, 'tcx>( - tcx: TyCtxt<'ll, 'tcx, 'tcx>, - cnum: CrateNum, -) -> (Arc, Arc>>>) -{ - assert_eq!(cnum, LOCAL_CRATE); - - let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { - Some(ref s) => { - let mode_string = s.to_lowercase(); - let mode_string = mode_string.trim(); - if mode_string == "eager" { - MonoItemCollectionMode::Eager - } else { - if mode_string != "lazy" { - let message = format!("Unknown codegen-item collection mode '{}'. \ - Falling back to 'lazy' mode.", - mode_string); - tcx.sess.warn(&message); - } - - MonoItemCollectionMode::Lazy - } - } - None => { - if tcx.sess.opts.cg.link_dead_code { - MonoItemCollectionMode::Eager - } else { - MonoItemCollectionMode::Lazy - } - } - }; - - let (items, inlining_map) = - time(tcx.sess, "monomorphization collection", || { - collector::collect_crate_mono_items(tcx, collection_mode) - }); - - tcx.sess.abort_if_errors(); - - ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); - - let strategy = if tcx.sess.opts.incremental.is_some() { - PartitioningStrategy::PerModule - } else { - PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) - }; - - let codegen_units = time(tcx.sess, "codegen unit partitioning", || { - partitioning::partition(tcx, - items.iter().cloned(), - strategy, - &inlining_map) - .into_iter() - .map(Arc::new) - .collect::>() - }); - - let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { - match *mono_item { - MonoItem::Fn(ref instance) => Some(instance.def_id()), - MonoItem::Static(def_id) => Some(def_id), - _ => None, - } - }).collect(); - - if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { - let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); - - for cgu in &codegen_units { - for (&mono_item, &linkage) in cgu.items() { - item_to_cgus.entry(mono_item) - .or_default() - .push((cgu.name().clone(), linkage)); - } - } - - let mut item_keys: Vec<_> = items - .iter() - .map(|i| { - let mut output = i.to_string(tcx); - output.push_str(" @@"); - let mut empty = Vec::new(); - let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); - cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); - cgus.dedup(); - for &(ref cgu_name, (linkage, _)) in cgus.iter() { - output.push_str(" "); - output.push_str(&cgu_name.as_str()); - - let linkage_abbrev = match linkage { - Linkage::External => "External", - Linkage::AvailableExternally => "Available", - Linkage::LinkOnceAny => "OnceAny", - Linkage::LinkOnceODR => "OnceODR", - Linkage::WeakAny => "WeakAny", - Linkage::WeakODR => "WeakODR", - Linkage::Appending => "Appending", - Linkage::Internal => "Internal", - Linkage::Private => "Private", - Linkage::ExternalWeak => "ExternalWeak", - Linkage::Common => "Common", - }; - - output.push_str("["); - output.push_str(linkage_abbrev); - output.push_str("]"); - } - output - }) - .collect(); - - item_keys.sort(); - - for item in item_keys { - println!("MONO_ITEM {}", item); - } - } - - (Arc::new(mono_items), Arc::new(codegen_units)) -} - -impl CrateInfo { - pub fn new(tcx: TyCtxt) -> CrateInfo { - let mut info = CrateInfo { - panic_runtime: None, - compiler_builtins: None, - profiler_runtime: None, - sanitizer_runtime: None, - is_no_builtins: Default::default(), - native_libraries: Default::default(), - used_libraries: tcx.native_libraries(LOCAL_CRATE), - link_args: tcx.link_args(LOCAL_CRATE), - crate_name: Default::default(), - used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), - used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), - used_crate_source: Default::default(), - wasm_imports: Default::default(), - lang_item_to_crate: Default::default(), - missing_lang_items: Default::default(), - }; - let lang_items = tcx.lang_items(); - - let load_wasm_items = tcx.sess.crate_types.borrow() - .iter() - .any(|c| *c != config::CrateType::Rlib) && - tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; - - if load_wasm_items { - info.load_wasm_imports(tcx, LOCAL_CRATE); - } - - let crates = tcx.crates(); - - let n_crates = crates.len(); - info.native_libraries.reserve(n_crates); - info.crate_name.reserve(n_crates); - info.used_crate_source.reserve(n_crates); - info.missing_lang_items.reserve(n_crates); - - for &cnum in crates.iter() { - info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); - info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); - info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); - if tcx.is_panic_runtime(cnum) { - info.panic_runtime = Some(cnum); - } - if tcx.is_compiler_builtins(cnum) { - info.compiler_builtins = Some(cnum); - } - if tcx.is_profiler_runtime(cnum) { - info.profiler_runtime = Some(cnum); - } - if tcx.is_sanitizer_runtime(cnum) { - info.sanitizer_runtime = Some(cnum); - } - if tcx.is_no_builtins(cnum) { - info.is_no_builtins.insert(cnum); - } - if load_wasm_items { - info.load_wasm_imports(tcx, cnum); - } - let missing = tcx.missing_lang_items(cnum); - for &item in missing.iter() { - if let Ok(id) = lang_items.require(item) { - info.lang_item_to_crate.insert(item, id.krate); - } - } - - // No need to look for lang items that are whitelisted and don't - // actually need to exist. - let missing = missing.iter() - .cloned() - .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) - .collect(); - info.missing_lang_items.insert(cnum, missing); - } - - return info - } - - fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { - self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { - let instance = Instance::mono(tcx, id); - let import_name = tcx.symbol_name(instance); - - (import_name.to_string(), module.clone()) - })); - } -} - -fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { - let (all_mono_items, _) = - tcx.collect_and_partition_mono_items(LOCAL_CRATE); - all_mono_items.contains(&id) -} - pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { @@ -1241,52 +257,15 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, } } -pub fn provide(providers: &mut Providers) { - providers.collect_and_partition_mono_items = - collect_and_partition_mono_items; - - providers.is_codegened_item = is_codegened_item; - - providers.codegen_unit = |tcx, name| { - let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); - all.iter() - .find(|cgu| *cgu.name() == name) - .cloned() - .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) - }; - - provide_extern(providers); -} - -pub fn provide_extern(providers: &mut Providers) { - providers.dllimport_foreign_items = |tcx, krate| { - let module_map = tcx.foreign_modules(krate); - let module_map = module_map.iter() - .map(|lib| (lib.def_id, lib)) - .collect::>(); - - let dllimports = tcx.native_libraries(krate) - .iter() - .filter(|lib| { - if lib.kind != cstore::NativeLibraryKind::NativeUnknown { - return false - } - let cfg = match lib.cfg { - Some(ref cfg) => cfg, - None => return true, - }; - attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) - }) - .filter_map(|lib| lib.foreign_module) - .map(|id| &module_map[&id]) - .flat_map(|module| module.foreign_items.iter().cloned()) - .collect(); - Lrc::new(dllimports) - }; - - providers.is_dllimport_foreign_item = |tcx, def_id| { - tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) +pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { + let sect = match attrs.link_section { + Some(name) => name, + None => return, }; + unsafe { + let buf = SmallCStr::new(§.as_str()); + llvm::LLVMSetSection(llval, buf.as_ptr()); + } } pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 45ce932597fa8..24564e7628fce 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -52,14 +52,6 @@ fn noname() -> *const c_char { &CNULL } -bitflags! { - pub struct MemFlags: u8 { - const VOLATILE = 1 << 0; - const NONTEMPORAL = 1 << 1; - const UNALIGNED = 1 << 2; - } -} - impl HasCodegen<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { type CodegenCx = CodegenCx<'ll, 'tcx, &'ll Value>; } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 5df3440abff40..f02544a46413a 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -203,18 +203,3 @@ pub fn get_fn( llfn } - -pub fn resolve_and_get_fn<'ll, 'tcx: 'll, Cx : CodegenMethods<'ll, 'tcx>>( - cx: &Cx, - def_id: DefId, - substs: &'tcx Substs<'tcx>, -) -> Cx::Value { - cx.get_fn( - ty::Instance::resolve( - *cx.tcx(), - ty::ParamEnv::reveal_all(), - def_id, - substs - ).unwrap() - ) -} diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index a86e7234b6c20..ae2f6f2a184f6 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -370,137 +370,3 @@ pub fn struct_in_context( fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { ((hi as u128) << 64) | (lo as u128) } - -pub fn langcall(tcx: TyCtxt, - span: Option, - msg: &str, - li: LangItem) - -> DefId { - tcx.lang_items().require(li).unwrap_or_else(|s| { - let msg = format!("{} {}", msg, s); - match span { - Some(span) => tcx.sess.span_fatal(span, &msg[..]), - None => tcx.sess.fatal(&msg[..]), - } - }) -} - -// To avoid UB from LLVM, these two functions mask RHS with an -// appropriate mask unconditionally (i.e. the fallback behavior for -// all shifts). For 32- and 64-bit types, this matches the semantics -// of Java. (See related discussion on #1877 and #10183.) - -pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - lhs: >::Value, - rhs: >::Value -) -> >::Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - bx.shl(lhs, rhs) -} - -pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - lhs_t: Ty<'tcx>, - lhs: >::Value, - rhs: >::Value -) -> >::Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - let is_signed = lhs_t.is_signed(); - if is_signed { - bx.ashr(lhs, rhs) - } else { - bx.lshr(lhs, rhs) - } -} - -fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - rhs: >::Value -) -> >::Value { - let rhs_llty = bx.cx().val_ty(rhs); - bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) -} - -pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, - llty: >::Type, - mask_llty: >::Type, - invert: bool -) -> >::Value { - let kind = bx.cx().type_kind(llty); - match kind { - TypeKind::Integer => { - // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. - let val = bx.cx().int_width(llty) - 1; - if invert { - bx.cx().const_int(mask_llty, !val as i64) - } else { - bx.cx().const_uint(mask_llty, val) - } - }, - TypeKind::Vector => { - let mask = shift_mask_val( - bx, - bx.cx().element_type(llty), - bx.cx().element_type(mask_llty), - invert - ); - bx.vector_splat(bx.cx().vector_length(mask_llty), mask) - }, - _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), - } -} - -pub fn ty_fn_sig<'ll, 'tcx:'ll, Cx: CodegenMethods<'ll, 'tcx>>( - cx: &Cx, - ty: Ty<'tcx> -) -> ty::PolyFnSig<'tcx> { - match ty.sty { - ty::FnDef(..) | - // Shims currently have type FnPtr. Not sure this should remain. - ty::FnPtr(_) => ty.fn_sig(*cx.tcx()), - ty::Closure(def_id, substs) => { - let tcx = *cx.tcx(); - let sig = substs.closure_sig(def_id, tcx); - - let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); - sig.map_bound(|sig| tcx.mk_fn_sig( - iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), - sig.output(), - sig.variadic, - sig.unsafety, - sig.abi - )) - } - ty::Generator(def_id, substs, _) => { - let tcx = *cx.tcx(); - let sig = substs.poly_sig(def_id, tcx); - - let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); - let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); - - sig.map_bound(|sig| { - let state_did = tcx.lang_items().gen_state().unwrap(); - let state_adt_ref = tcx.adt_def(state_did); - let state_substs = tcx.intern_substs(&[ - sig.yield_ty.into(), - sig.return_ty.into(), - ]); - let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); - - tcx.mk_fn_sig(iter::once(env_ty), - ret_ty, - false, - hir::Unsafety::Normal, - Abi::Rust - ) - }) - } - _ => bug!("unexpected type {:?} to ty_fn_sig", ty) - } -} diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index bfa9e40f2190c..18d78591e8122 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -32,6 +32,61 @@ use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; use std::ffi::{CStr, CString}; +pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocation) -> &'ll Value { + let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); + let layout = cx.data_layout(); + let pointer_size = layout.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, alloc_id) in alloc.relocations.iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset])); + } + let ptr_offset = read_target_uint( + layout.endian, + &alloc.bytes[offset..(offset + pointer_size)], + ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; + llvals.push(cx.scalar_to_backend( + Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(), + &layout::Scalar { + value: layout::Primitive::Pointer, + valid_range: 0..=!0 + }, + cx.type_i8p() + )); + next_offset = offset + pointer_size; + } + if alloc.bytes.len() >= next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..])); + } + + cx.const_struct(&llvals, true) +} + +pub fn codegen_static_initializer( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + def_id: DefId, +) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { + let instance = ty::Instance::mono(cx.tcx, def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + let param_env = ty::ParamEnv::reveal_all(); + let static_ = cx.tcx.const_eval(param_env.and(cid))?; + + let alloc = match static_.val { + ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, + _ => bug!("static const eval returned {:#?}", static_), + }; + Ok((const_alloc_to_llvm(cx, alloc), alloc)) +} + + + fn set_global_alignment(cx: &CodegenCx<'ll, '_, &'ll Value>, gv: &'ll Value, mut align: Align) { diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 0de2193edfb98..42b65a4c95688 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -446,6 +446,9 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { attributes::apply_target_cpu_attr(self, llfn) } + fn env_alloca_allowed(&self) { + unsafe { llvm::LLVMRustVersionMajor() < 6 } + } fn create_used_variable(&self) { let name = const_cstr!("llvm.used"); diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index 8f70997eb2500..d7180daeced74 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -27,21 +27,6 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use syntax_pos::BytePos; -#[derive(Clone, Copy, Debug)] -pub struct MirDebugScope { - pub scope_metadata: Option, - // Start and end offsets of the file to which this DIScope belongs. - // These are used to quickly determine whether some span refers to the same file. - pub file_start_pos: BytePos, - pub file_end_pos: BytePos, -} - -impl MirDebugScope { - pub fn is_valid(&self) -> bool { - self.scope_metadata.is_some() - } -} - /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes( diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 096a22e61dde1..2c97de1b95f58 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -111,53 +111,6 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { } } -pub enum FunctionDebugContext<'ll> { - RegularContext(FunctionDebugContextData<'ll>), - DebugInfoDisabled, - FunctionWithoutDebugInfo, -} - -impl FunctionDebugContext<'ll> { - pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<'ll> { - match *self { - FunctionDebugContext::RegularContext(ref data) => data, - FunctionDebugContext::DebugInfoDisabled => { - span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); - } - } - } - - fn debuginfo_disabled_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" - } - - fn should_be_ignored_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext for function that should be \ - ignored by debug info!" - } -} - -pub struct FunctionDebugContextData<'ll> { - fn_metadata: &'ll DISubprogram, - source_locations_enabled: Cell, - pub defining_crate: CrateNum, -} - -pub enum VariableAccess<'a, V> { - // The llptr given is an alloca containing the variable's value - DirectVariable { alloca: V }, - // The llptr given is an alloca containing the start of some pointer chain - // leading to the variable's content. - IndirectVariable { alloca: V, address_operations: &'a [i64] } -} - -pub enum VariableKind { - ArgumentVariable(usize /*index*/), - LocalVariable, -} /// Create any deferred debug metadata nodes pub fn finalize(cx: &CodegenCx<'ll, '_, &'ll Value>) { @@ -596,4 +549,13 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V fn debuginfo_finalize(&self) { finalize(self) } + + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64] { + unsafe { + [llvm::LLVMRustDIBuilderCreateOpDeref(), + llvm::LLVMRustDIBuilderCreateOpPlusUconst(), + byte_offset_of_var_in_env as i64, + llvm::LLVMRustDIBuilderCreateOpDeref()] + }; + } } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 42271d37958c1..17c93288d504b 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -51,18 +51,6 @@ pub fn set_source_location( set_debug_location(bx, dbg_loc); } -/// Enables emitting source locations for the given functions. -/// -/// Since we don't want source locations to be emitted for the function prelude, -/// they are disabled when beginning to codegen a new function. This functions -/// switches source location emitting on and must therefore be called before the -/// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) { - if let FunctionDebugContext::RegularContext(ref data) = *dbg_context { - data.source_locations_enabled.set(true); - } -} - #[derive(Copy, Clone, PartialEq)] pub enum InternalDebugLocation<'ll> { diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs index 9de2c79f3bcf7..e41ab4576073c 100644 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ b/src/librustc_codegen_llvm/interfaces/mod.rs @@ -8,27 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -mod builder; -mod consts; -mod type_; -mod intrinsic; -mod debuginfo; -mod abi; -mod asm; - -pub use self::builder::{BuilderMethods, HasCodegen}; pub use rustc_codegen_ssa::interfaces::{Backend, BackendMethods, CodegenObject, MiscMethods, - StaticMethods, DeclareMethods, PreDefineMethods}; -pub use self::consts::ConstMethods; -pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, - LayoutTypeMethods, ArgTypeMethods}; -pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; -pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; -pub use self::abi::{AbiMethods, AbiBuilderMethods}; -pub use self::asm::{AsmMethods, AsmBuilderMethods}; - -pub trait CodegenMethods<'ll, 'tcx: 'll> : - Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + - StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + - IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods + - PreDefineMethods<'ll, 'tcx> {} + StaticMethods, DeclareMethods, PreDefineMethods, BuilderMethods, HasCodegen, ConstMethods, + TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, ArgTypeMethods, + IntrinsicCallMethods, IntrinsicDeclarationMethods, DebugInfoMethods, DebugInfoBuilderMethods, + AbiMethods, AbiBuilderMethods, AsmMethods, AsmBuilderMethods, CodegenMethods}; diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 1afc37e40f49f..d3c7f18fe598f 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -39,7 +39,6 @@ use back::write::create_target_machine; use syntax_pos::symbol::Symbol; -#[macro_use] extern crate bitflags; extern crate flate2; extern crate libc; #[macro_use] extern crate rustc; @@ -181,6 +180,12 @@ impl BackendMethods for LlvmCodegenBackend { ) { codegen.submit_pre_codegened_module_to_llvm(tcx, module) } + fn submit_pre_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) { + write::submit_pre_lto_module_to_llvm(tcx, module) + } + fn submit_post_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) { + write::submit_post_lto_module_to_llvm(tcx, module) + } fn codegen_finished(&self, codegen : &OngoingCodegen, tcx: TyCtxt) { codegen.codegen_finished(tcx) } @@ -385,24 +390,4 @@ struct CodegenResults { linker_info: back::linker::LinkerInfo, crate_info: CrateInfo, } - -/// Misc info we load from metadata to persist beyond the tcx -struct CrateInfo { - panic_runtime: Option, - compiler_builtins: Option, - profiler_runtime: Option, - sanitizer_runtime: Option, - is_no_builtins: FxHashSet, - native_libraries: FxHashMap>>, - crate_name: FxHashMap, - used_libraries: Lrc>, - link_args: Lrc>, - used_crate_source: FxHashMap>, - used_crates_static: Vec<(CrateNum, LibSource)>, - used_crates_dynamic: Vec<(CrateNum, LibSource)>, - wasm_imports: FxHashMap, - lang_item_to_crate: FxHashMap, - missing_lang_items: FxHashMap>, -} - __build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/mono-item.rs b/src/librustc_codegen_llvm/mono-item.rs new file mode 100644 index 0000000000000..f103e32fe80a3 --- /dev/null +++ b/src/librustc_codegen_llvm/mono-item.rs @@ -0,0 +1,99 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use attributes; +use base; +use context::CodegenCx; +use llvm; +use monomorphize::Instance; +use type_of::LayoutLlvmExt; +use rustc::hir; +use rustc::hir::def::Def; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty::{TypeFoldable, Ty}; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; +use std::fmt; +use value::Value; +use interfaces::*; + +pub use rustc::mir::mono::MonoItem; + +pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; + + +impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx> + for MonoItem<'tcx> {} + +impl<'ll, 'tcx: 'll> PreDefineMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn predefine_static(&self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); + + let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { + self.sess().span_fatal(self.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); + + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); + } + + self.instances.borrow_mut().insert(instance, g); + } + + fn predefine_fn(&self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let mono_ty = instance.ty(self.tcx); + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + let lldecl = self.declare_fn(symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(self.llmod, lldecl); + } + + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + self.tcx.is_compiler_builtins(LOCAL_CRATE) { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + } + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + } + } + + debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); + if instance.def.is_inline(self.tcx) { + attributes::inline(self, lldecl, attributes::InlineAttr::Hint); + } + attributes::from_fn_attrs(self, lldecl, Some(instance.def.def_id())); + + self.instances.borrow_mut().insert(instance, lldecl); + } +} diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 69b5ea188f6d7..e620a09b62bd6 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -410,6 +410,12 @@ impl LayoutTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool { ty.is_llvm_immediate() } + fn is_backend_scalar_pair(&self, ty: &TyLayout<'tcx>) -> bool { + ty.is_llvm_scalar_pair() + } + fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64 { + ty.llvm_field_index() + } fn scalar_pair_element_backend_type<'a>( &self, ty: &TyLayout<'tcx>, diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml index 8fdf35f96dc87..16b00802cd1cb 100644 --- a/src/librustc_codegen_ssa/Cargo.toml +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -10,9 +10,15 @@ crate-type = ["dylib"] test = false [dependencies] +bitflags = "1.0" +log = "0.4" + syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } rustc = { path = "../librustc" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } +rustc_apfloat = { path = "../librustc_apfloat" } rustc_mir = { path = "../librustc_mir" } +rustc_codegen_utils = { path = "../librustc_codegen_utils" } +rustc_incremental = { path = "../librustc_incremental" } diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs new file mode 100644 index 0000000000000..48ef2d6ff8829 --- /dev/null +++ b/src/librustc_codegen_ssa/base.rs @@ -0,0 +1,1103 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Codegen the completed AST to the LLVM IR. +//! +//! Some functions here, such as codegen_block and codegen_expr, return a value -- +//! the result of the codegen to LLVM -- while others, such as codegen_fn +//! and mono_item, are called only for the side effect of adding a +//! particular definition to the LLVM IR output we're producing. +//! +//! Hopefully useful general knowledge about codegen: +//! +//! * There's no way to find out the Ty type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, +//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, +//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen}; + +use rustc::dep_graph::cgu_reuse_tracker::CguReuse; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::middle::lang_items::StartFnLangItem; +use rustc::middle::weak_lang_items; +use rustc::mir::mono::{Linkage, Stats, CodegenUnitNameBuilder}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; +use rustc::ty::query::Providers; +use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::util::common::{time, print_time_passes_entry}; +use rustc::util::profiling::ProfileCategory; +use rustc::session::config::{self, EntryFnType, Lto}; +use rustc::session::Session; +use mir::place::PlaceRef; +use {MemFlags, CrateInfo}; +use callee; +use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; +use rustc_mir::monomorphize::item::DefPathBasedNames; +use common::{self, RealPredicate, TypeKind, IntPredicate}; +use meth; +use mir; +use rustc::util::time_graph; +use rustc_mir::monomorphize::Instance; +use rustc_mir::monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; +use mono_item::{MonoItem, BaseMonoItemExt}; +use rustc::util::nodemap::{FxHashMap, FxHashSet, DefIdSet}; +use rustc_data_structures::sync::Lrc; +use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr}; +use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; + +use interfaces::*; + +use std::any::Any; +use std::sync::Arc; +use std::time::{Instant, Duration}; +use std::cmp; +use std::sync::mpsc; +use syntax_pos::Span; +use syntax::attr; +use rustc::hir; + +use mir::operand::OperandValue; + +use std::marker::PhantomData; + + +pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { + cx: &'a Cx, + name: Option, + istart: usize, + phantom: PhantomData<(&'ll (), &'tcx ())> +} + +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> StatRecorder<'a, 'll, 'tcx, Cx> { + pub fn new(cx: &'a Cx, name: String) -> Self { + let istart = cx.stats().borrow().n_llvm_insns; + StatRecorder { + cx, + name: Some(name), + istart, + phantom: PhantomData + } + } +} + +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Drop for + StatRecorder<'a, 'll, 'tcx, Cx> +{ + fn drop(&mut self) { + if self.cx.sess().codegen_stats() { + let mut stats = self.cx.stats().borrow_mut(); + let iend = stats.n_llvm_insns; + stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); + stats.n_fns += 1; + // Reset LLVM insn count to avoid compound costs. + stats.n_llvm_insns = self.istart; + } + } +} + +pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, + signed: bool) + -> IntPredicate { + match op { + hir::BinOpKind::Eq => IntPredicate::IntEQ, + hir::BinOpKind::Ne => IntPredicate::IntNE, + hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT }, + hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE }, + hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT }, + hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE }, + op => { + bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ + found {:?}", + op) + } + } +} + +pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { + match op { + hir::BinOpKind::Eq => RealPredicate::RealOEQ, + hir::BinOpKind::Ne => RealPredicate::RealUNE, + hir::BinOpKind::Lt => RealPredicate::RealOLT, + hir::BinOpKind::Le => RealPredicate::RealOLE, + hir::BinOpKind::Gt => RealPredicate::RealOGT, + hir::BinOpKind::Ge => RealPredicate::RealOGE, + op => { + bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ + found {:?}", + op); + } + } +} + +pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + lhs: >::Value, + rhs: >::Value, + t: Ty<'tcx>, + ret_ty: >::Type, + op: hir::BinOpKind +) -> >::Value { + let signed = match t.sty { + ty::Float(_) => { + let cmp = bin_op_to_fcmp_predicate(op); + return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); + }, + ty::Uint(_) => false, + ty::Int(_) => true, + _ => bug!("compare_simd_types: invalid SIMD type"), + }; + + let cmp = bin_op_to_icmp_predicate(op, signed); + // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension + // to get the correctly sized type. This will compile to a single instruction + // once the IR is converted to assembly if the SIMD instruction is supported + // by the target architecture. + bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) +} + +/// Retrieve the information we are losing (making dynamic) in an unsizing +/// adjustment. +/// +/// The `old_info` argument is a bit funny. It is intended for use +/// in an upcast, where the new vtable for an object will be derived +/// from the old one. +pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( + cx: &'a Cx, + source: Ty<'tcx>, + target: Ty<'tcx>, + old_info: Option, +) -> Cx::Value where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let (source, target) = cx.tcx().struct_lockstep_tails(source, target); + match (&source.sty, &target.sty) { + (&ty::Array(_, len), &ty::Slice(_)) => { + cx.const_usize(len.unwrap_usize(*cx.tcx())) + } + (&ty::Dynamic(..), &ty::Dynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + old_info.expect("unsized_info: missing old info for trait upcast") + } + (_, &ty::Dynamic(ref data, ..)) => { + let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) + .field(cx, FAT_PTR_EXTRA); + cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()), + cx.backend_type(&vtable_ptr)) + } + _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", + source, + target), + } +} + +/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. +pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + src: >::Value, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (>::Value, >::Value) where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(_, a, _), + &ty::Ref(_, b, _)) | + (&ty::Ref(_, a, _), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bx.cx().layout_of(src_ty); + let dst_layout = bx.cx().layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bx.cx(), i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bx.cx(), i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(&dst_layout, 0, true)), + bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(&dst_layout, 1, true))) + } + _ => bug!("unsize_thin_ptr: called on bad types"), + } +} + +/// Coerce `src`, which is a reference to a value of type `src_ty`, +/// to a value of type `dst_ty` and store the result in `dst` +pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + src: PlaceRef<'tcx, >::Value>, + dst: PlaceRef<'tcx, >::Value> +) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; + let coerce_ptr = || { + let (base, info) = match bx.load_ref(&src).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); + (bx.pointercast(base, bx.cx().backend_type(&thin_ptr)), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() + }; + OperandValue::Pair(base, info).store(bx, dst); + }; + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(..), &ty::Ref(..)) | + (&ty::Ref(..), &ty::RawPtr(..)) | + (&ty::RawPtr(..), &ty::RawPtr(..)) => { + coerce_ptr() + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + coerce_ptr() + } + + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + for i in 0..def_a.variants[0].fields.len() { + let src_f = src.project_field(bx, i); + let dst_f = dst.project_field(bx, i); + + if dst_f.layout.is_zst() { + continue; + } + + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout, + src_f.align.min(dst_f.align), MemFlags::empty()); + } else { + coerce_unsized_into(bx, src_f, dst_f); + } + } + } + _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", + src_ty, + dst_ty), + } +} + +pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + op: hir::BinOpKind, + lhs: >::Value, + rhs: >::Value +) -> >::Value { + cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) +} + +fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + op: hir::BinOpKind, + lhs: >::Value, + rhs: >::Value, + trunc: F, + zext: G +) -> >::Value + where F: FnOnce( + >::Value, + >::Type + ) -> >::Value, + G: FnOnce( + >::Value, + >::Type + ) -> >::Value +{ + // Shifts may have any size int on the rhs + if op.is_shift() { + let mut rhs_llty = bx.cx().val_ty(rhs); + let mut lhs_llty = bx.cx().val_ty(lhs); + if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.cx().element_type(rhs_llty) + } + if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.cx().element_type(lhs_llty) + } + let rhs_sz = bx.cx().int_width(rhs_llty); + let lhs_sz = bx.cx().int_width(lhs_llty); + if lhs_sz < rhs_sz { + trunc(rhs, lhs_llty) + } else if lhs_sz > rhs_sz { + // FIXME (#1877: If shifting by negative + // values becomes not undefined then this is wrong. + zext(rhs, lhs_llty) + } else { + rhs + } + } else { + rhs + } +} + +/// Returns whether this session's target will use SEH-based unwinding. +/// +/// This is only true for MSVC targets, and even then the 64-bit MSVC target +/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as +/// 64-bit MinGW) instead of "full SEH". +pub fn wants_msvc_seh(sess: &Session) -> bool { + sess.target.target.options.is_like_msvc +} + +pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( + bx: &Bx, + val: >::Value +) { + let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); + bx.call(assume_intrinsic, &[val], None); +} + +pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( + bx: &Bx, + val: >::Value +) -> >::Value { + if bx.cx().val_ty(val) == bx.cx().type_i1() { + bx.zext(val, bx.cx().type_i8()) + } else { + val + } +} + +pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + val: >::Value, + layout: layout::TyLayout, +) -> >::Value { + if let layout::Abi::Scalar(ref scalar) = layout.abi { + return to_immediate_scalar(bx, val, scalar); + } + val +} + +pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + val: >::Value, + scalar: &layout::Scalar, +) -> >::Value { + if scalar.is_bool() { + return bx.trunc(val, bx.cx().type_i1()); + } + val +} + +pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + dst: >::Value, + src: >::Value, + layout: TyLayout<'tcx>, + align: Align, + flags: MemFlags, +) { + let size = layout.size.bytes(); + if size == 0 { + return; + } + + bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); +} + +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx> +) where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let _s = if cx.sess().codegen_stats() { + let mut instance_name = String::new(); + DefPathBasedNames::new(*cx.tcx(), true, true) + .push_def_path(instance.def_id(), &mut instance_name); + Some(StatRecorder::new(cx, instance_name)) + } else { + None + }; + + // this is an info! to allow collecting monomorphization statistics + // and to allow finding the last function before LLVM aborts from + // release builds. + info!("codegen_instance({})", instance); + + let fn_ty = instance.ty(*cx.tcx()); + let sig = common::ty_fn_sig(cx, fn_ty); + let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + + let lldecl = match cx.instances().borrow().get(&instance) { + Some(&val) => val, + None => bug!("Instance `{:?}` not already declared", instance) + }; + + cx.stats().borrow_mut().n_closures += 1; + + let mir = cx.tcx().instance_mir(instance.def); + mir::codegen_mir::<'a, 'll, 'tcx, Bx>( + cx, lldecl, &mir, instance, sig + ); +} + +/// Create the `main` function which will initialize the rust runtime and call +/// users main function. +fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx +) { + let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { + Some((id, span, _)) => { + (cx.tcx().hir.local_def_id(id), span) + } + None => return, + }; + + let instance = Instance::mono(*cx.tcx(), main_def_id); + + if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { + // We want to create the wrapper in the same codegen unit as Rust's main + // function. + return; + } + + let main_llfn = cx.get_fn(instance); + + let et = cx.sess().entry_fn.get().map(|e| e.2); + match et { + Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), + Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), + None => {} // Do nothing. + } + + fn create_entry_fn<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + sp: Span, + rust_main: >::Value, + rust_main_def_id: DefId, + use_start_lang_item: bool, + ) { + let llfty = + cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); + + let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); + // Given that `main()` has no arguments, + // then its return type cannot have + // late-bound regions, since late-bound + // regions must appear in the argument + // listing. + let main_ret_ty = cx.tcx().erase_regions( + &main_ret_ty.no_late_bound_regions().unwrap(), + ); + + if cx.get_defined_value("main").is_some() { + // FIXME: We should be smart and show a better diagnostic here. + cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") + .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") + .emit(); + cx.sess().abort_if_errors(); + bug!(); + } + let llfn = cx.declare_cfn("main", llfty); + + // `main` should respect same config for frame pointer elimination as rest of code + cx.set_frame_pointer_elimination(llfn); + cx.apply_target_cpu_attr(llfn); + + let bx = Bx::new_block(&cx, llfn, "top"); + + bx.insert_reference_to_gdb_debug_scripts_section_global(); + + // Params from native main() used as args for rust start function + let param_argc = cx.get_param(llfn, 0); + let param_argv = cx.get_param(llfn, 1); + let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); + let arg_argv = param_argv; + + let (start_fn, args) = if use_start_lang_item { + let start_def_id = cx.tcx().require_lang_item(StartFnLangItem); + let start_fn = callee::resolve_and_get_fn( + cx, + start_def_id, + cx.tcx().intern_substs(&[main_ret_ty.into()]), + ); + (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), + arg_argc, arg_argv]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![arg_argc, arg_argv]) + }; + + let result = bx.call(start_fn, &args, None); + bx.ret(bx.intcast(result, cx.type_int(), true)); + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); + + +pub fn codegen_crate( + backend: B, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + rx: mpsc::Receiver> +) -> B::OngoingCodegen { + + check_for_rustc_errors_attr(tcx); + + if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { + if backend.thin_lto_available() { + tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); + } + } + + if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || + !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && + backend.pgo_available() + { + tcx.sess.fatal("this compiler's LLVM does not support PGO"); + } + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); + + // Codegen the metadata. + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + + let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("metadata")).as_str() + .to_string(); + let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); + let metadata = time(tcx.sess, "write metadata", || { + backend.write_metadata(tcx, &metadata_llvm_module) + }); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); + + let metadata_module = ModuleCodegen { + name: metadata_cgu_name, + module_llvm: metadata_llvm_module, + kind: ModuleKind::Metadata, + }; + + let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; + + // Skip crate items and just output metadata in -Z no-codegen mode. + if tcx.sess.opts.debugging_opts.no_codegen || + !tcx.sess.opts.output_types.should_codegen() { + let ongoing_codegen = backend.start_async_codegen( + tcx, + time_graph.clone(), + metadata, + rx, + 1); + + backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); + backend.codegen_finished(&ongoing_codegen, tcx); + + assert_and_save_dep_graph(tcx); + + backend.check_for_errors(&ongoing_codegen, tcx.sess); + + return ongoing_codegen; + } + + // Run the monomorphization collector and partition the collected items into + // codegen units. + let codegen_units = + tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; + let codegen_units = (*codegen_units).clone(); + + // Force all codegen_unit queries so they are already either red or green + // when compile_codegen_unit accesses them. We are not able to re-execute + // the codegen_unit query from just the DepNode, so an unknown color would + // lead to having to re-execute compile_codegen_unit, possibly + // unnecessarily. + if tcx.dep_graph.is_fully_enabled() { + for cgu in &codegen_units { + tcx.codegen_unit(cgu.name().clone()); + } + } + + let ongoing_codegen = backend.start_async_codegen( + tcx, + time_graph.clone(), + metadata, + rx, + codegen_units.len()); + + // Codegen an allocator shim, if necessary. + // + // If the crate doesn't have an `allocator_kind` set then there's definitely + // no shim to generate. Otherwise we also check our dependency graph for all + // our output crate types. If anything there looks like its a `Dynamic` + // linkage, then it's already got an allocator shim and we'll be using that + // one instead. If nothing exists then it's our job to generate the + // allocator! + let any_dynamic_crate = tcx.sess.dependency_formats.borrow() + .iter() + .any(|(_, list)| { + use rustc::middle::dependency_format::Linkage; + list.iter().any(|linkage| { + match linkage { + Linkage::Dynamic => true, + _ => false, + } + }) + }); + let allocator_module = if any_dynamic_crate { + None + } else if let Some(kind) = *tcx.sess.allocator_kind.get() { + let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("allocator")).as_str() + .to_string(); + let modules = backend.new_metadata(tcx.sess, &llmod_id); + time(tcx.sess, "write allocator module", || { + backend.codegen_allocator(tcx, &modules, kind) + }); + + Some(ModuleCodegen { + name: llmod_id, + module_llvm: modules, + kind: ModuleKind::Allocator, + }) + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, allocator_module); + } + + backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); + + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + codegen_units + }; + + let mut total_codegen_time = Duration::new(0, 0); + let mut all_stats = Stats::default(); + + for cgu in codegen_units.into_iter() { + backend.wait_for_signal_to_codegen_item(&ongoing_codegen); + backend.check_for_errors(&ongoing_codegen, tcx.sess); + + let cgu_reuse = determine_cgu_reuse(tcx, &cgu); + tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); + + match cgu_reuse { + CguReuse::No => { + let _timing_guard = time_graph.as_ref().map(|time_graph| { + time_graph.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + &format!("codegen {}", cgu.name())) + }); + let start_time = Instant::now(); + let stats = backend.compile_codegen_unit(tcx, *cgu.name()); + all_stats.extend(stats); + total_codegen_time += start_time.elapsed(); + false + } + CguReuse::PreLto => { + backend.submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + CguReuse::PostLto => { + backend.submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + }; + } + + backend.codegen_finished(&ongoing_codegen, tcx); + + // Since the main thread is sometimes blocked during codegen, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "codegen to LLVM IR", + total_codegen_time); + + rustc_incremental::assert_module_sources::assert_module_sources(tcx); + + symbol_names_test::report_symbol_names(tcx); + + if tcx.sess.codegen_stats() { + println!("--- codegen stats ---"); + println!("n_glues_created: {}", all_stats.n_glues_created); + println!("n_null_glues: {}", all_stats.n_null_glues); + println!("n_real_glues: {}", all_stats.n_real_glues); + + println!("n_fns: {}", all_stats.n_fns); + println!("n_inlines: {}", all_stats.n_inlines); + println!("n_closures: {}", all_stats.n_closures); + println!("fn stats:"); + all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); + for &(ref name, insns) in all_stats.fn_stats.iter() { + println!("{} insns, {}", insns, *name); + } + } + + if tcx.sess.count_llvm_insns() { + for (k, v) in all_stats.llvm_insns.iter() { + println!("{:7} {}", *v, *k); + } + } + + backend.check_for_errors(&ongoing_codegen, tcx.sess); + + assert_and_save_dep_graph(tcx); + ongoing_codegen +} + +fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { + time(tcx.sess, + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess, + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx)); +} + +fn collect_and_partition_mono_items<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + cnum: CrateNum, +) -> (Arc, Arc>>>) +{ + assert_eq!(cnum, LOCAL_CRATE); + + let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { + Some(ref s) => { + let mode_string = s.to_lowercase(); + let mode_string = mode_string.trim(); + if mode_string == "eager" { + MonoItemCollectionMode::Eager + } else { + if mode_string != "lazy" { + let message = format!("Unknown codegen-item collection mode '{}'. \ + Falling back to 'lazy' mode.", + mode_string); + tcx.sess.warn(&message); + } + + MonoItemCollectionMode::Lazy + } + } + None => { + if tcx.sess.opts.cg.link_dead_code { + MonoItemCollectionMode::Eager + } else { + MonoItemCollectionMode::Lazy + } + } + }; + + let (items, inlining_map) = + time(tcx.sess, "monomorphization collection", || { + collector::collect_crate_mono_items(tcx, collection_mode) + }); + + tcx.sess.abort_if_errors(); + + ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); + + let strategy = if tcx.sess.opts.incremental.is_some() { + PartitioningStrategy::PerModule + } else { + PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) + }; + + let codegen_units = time(tcx.sess, "codegen unit partitioning", || { + partitioning::partition(tcx, + items.iter().cloned(), + strategy, + &inlining_map) + .into_iter() + .map(Arc::new) + .collect::>() + }); + + let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { + match *mono_item { + MonoItem::Fn(ref instance) => Some(instance.def_id()), + MonoItem::Static(def_id) => Some(def_id), + _ => None, + } + }).collect(); + + if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { + let mut item_to_cgus: FxHashMap<_, Vec<_>> = FxHashMap(); + + for cgu in &codegen_units { + for (&mono_item, &linkage) in cgu.items() { + item_to_cgus.entry(mono_item) + .or_default() + .push((cgu.name().clone(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = i.to_string(tcx); + output.push_str(" @@"); + let mut empty = Vec::new(); + let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); + cgus.dedup(); + for &(ref cgu_name, (linkage, _)) in cgus.iter() { + output.push_str(" "); + output.push_str(&cgu_name.as_str()); + + let linkage_abbrev = match linkage { + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", + }; + + output.push_str("["); + output.push_str(linkage_abbrev); + output.push_str("]"); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("MONO_ITEM {}", item); + } + } + + (Arc::new(mono_items), Arc::new(codegen_units)) +} + + +impl CrateInfo { + pub fn new(tcx: TyCtxt) -> CrateInfo { + let mut info = CrateInfo { + panic_runtime: None, + compiler_builtins: None, + profiler_runtime: None, + sanitizer_runtime: None, + is_no_builtins: FxHashSet(), + native_libraries: FxHashMap(), + used_libraries: tcx.native_libraries(LOCAL_CRATE), + link_args: tcx.link_args(LOCAL_CRATE), + crate_name: FxHashMap(), + used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), + used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), + used_crate_source: FxHashMap(), + wasm_imports: FxHashMap(), + lang_item_to_crate: FxHashMap(), + missing_lang_items: FxHashMap(), + }; + let lang_items = tcx.lang_items(); + + let load_wasm_items = tcx.sess.crate_types.borrow() + .iter() + .any(|c| *c != config::CrateType::Rlib) && + tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; + + if load_wasm_items { + info.load_wasm_imports(tcx, LOCAL_CRATE); + } + + for &cnum in tcx.crates().iter() { + info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); + info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); + info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); + if tcx.is_panic_runtime(cnum) { + info.panic_runtime = Some(cnum); + } + if tcx.is_compiler_builtins(cnum) { + info.compiler_builtins = Some(cnum); + } + if tcx.is_profiler_runtime(cnum) { + info.profiler_runtime = Some(cnum); + } + if tcx.is_sanitizer_runtime(cnum) { + info.sanitizer_runtime = Some(cnum); + } + if tcx.is_no_builtins(cnum) { + info.is_no_builtins.insert(cnum); + } + if load_wasm_items { + info.load_wasm_imports(tcx, cnum); + } + let missing = tcx.missing_lang_items(cnum); + for &item in missing.iter() { + if let Ok(id) = lang_items.require(item) { + info.lang_item_to_crate.insert(item, id.krate); + } + } + + // No need to look for lang items that are whitelisted and don't + // actually need to exist. + let missing = missing.iter() + .cloned() + .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) + .collect(); + info.missing_lang_items.insert(cnum, missing); + } + + return info + } + + fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { + for (&id, module) in tcx.wasm_import_module_map(cnum).iter() { + let instance = Instance::mono(tcx, id); + let import_name = tcx.symbol_name(instance); + self.wasm_imports.insert(import_name.to_string(), module.clone()); + } + } +} + +fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { + let (all_mono_items, _) = + tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all_mono_items.contains(&id) +} + + +pub fn provide(providers: &mut Providers) { + providers.collect_and_partition_mono_items = + collect_and_partition_mono_items; + + providers.is_codegened_item = is_codegened_item; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all.iter() + .find(|cgu| *cgu.name() == name) + .cloned() + .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) + }; + + provide_extern(providers); +} + +pub fn provide_extern(providers: &mut Providers) { + providers.dllimport_foreign_items = |tcx, krate| { + let module_map = tcx.foreign_modules(krate); + let module_map = module_map.iter() + .map(|lib| (lib.def_id, lib)) + .collect::>(); + + let dllimports = tcx.native_libraries(krate) + .iter() + .filter(|lib| { + if lib.kind != cstore::NativeLibraryKind::NativeUnknown { + return false + } + let cfg = match lib.cfg { + Some(ref cfg) => cfg, + None => return true, + }; + attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) + }) + .filter_map(|lib| lib.foreign_module) + .map(|id| &module_map[&id]) + .flat_map(|module| module.foreign_items.iter().cloned()) + .collect(); + Lrc::new(dllimports) + }; + + providers.is_dllimport_foreign_item = |tcx, def_id| { + tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) + }; +} + + +fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: &CodegenUnit<'tcx>) + -> CguReuse { + if !tcx.dep_graph.is_fully_enabled() { + return CguReuse::No + } + + let work_product_id = &cgu.work_product_id(); + if tcx.dep_graph.previous_work_product(work_product_id).is_none() { + // We don't have anything cached for this CGU. This can happen + // if the CGU did not exist in the previous session. + return CguReuse::No + } + + // Try to mark the CGU as green. If it we can do so, it means that nothing + // affecting the LLVM module has changed and we can re-use a cached version. + // If we compile with any kind of LTO, this means we can re-use the bitcode + // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only + // know that later). If we are not doing LTO, there is only one optimized + // version of each module, so we re-use that. + let dep_node = cgu.codegen_dep_node(tcx); + assert!(!tcx.dep_graph.dep_node_exists(&dep_node), + "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", + cgu.name()); + + if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { + // We can re-use either the pre- or the post-thinlto state + if tcx.sess.lto() != Lto::No { + CguReuse::PreLto + } else { + CguReuse::PostLto + } + } else { + CguReuse::No + } +} diff --git a/src/librustc_codegen_ssa/callee.rs b/src/librustc_codegen_ssa/callee.rs new file mode 100644 index 0000000000000..a69058f2accf5 --- /dev/null +++ b/src/librustc_codegen_ssa/callee.rs @@ -0,0 +1,29 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use interfaces::*; +use rustc::ty; +use rustc::ty::subst::Substs; +use rustc::hir::def_id::DefId; + +pub fn resolve_and_get_fn<'ll, 'tcx: 'll, Cx : CodegenMethods<'ll, 'tcx>>( + cx: &Cx, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Cx::Value { + cx.get_fn( + ty::Instance::resolve( + *cx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs index acd5bb3b93603..fa727589d235f 100644 --- a/src/librustc_codegen_ssa/common.rs +++ b/src/librustc_codegen_ssa/common.rs @@ -10,8 +10,19 @@ #![allow(non_camel_case_types, non_snake_case)] use rustc::ty::{self, Ty, TyCtxt}; -use syntax_pos::DUMMY_SP; -use interfaces::CodegenObject; +use syntax_pos::{DUMMY_SP, Span}; + +use rustc::hir::def_id::DefId; +use rustc::middle::lang_items::LangItem; +use base; +use interfaces::*; + +use rustc::hir; +use interfaces::BuilderMethods; + +use std::iter; + +use rustc_target::spec::abi::Abi; pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { @@ -188,3 +199,142 @@ mod temp_stable_hash_impls { } } } + + + +// To avoid UB from LLVM, these two functions mask RHS with an +// appropriate mask unconditionally (i.e. the fallback behavior for +// all shifts). For 32- and 64-bit types, this matches the semantics +// of Java. (See related discussion on #1877 and #10183.) + +pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + lhs: >::Value, + rhs: >::Value +) -> >::Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + bx.shl(lhs, rhs) +} + +pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + lhs_t: Ty<'tcx>, + lhs: >::Value, + rhs: >::Value +) -> >::Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + let is_signed = lhs_t.is_signed(); + if is_signed { + bx.ashr(lhs, rhs) + } else { + bx.lshr(lhs, rhs) + } +} + +fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + rhs: >::Value +) -> >::Value { + let rhs_llty = bx.cx().val_ty(rhs); + bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) +} + +pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &Bx, + llty: >::Type, + mask_llty: >::Type, + invert: bool +) -> >::Value { + let kind = bx.cx().type_kind(llty); + match kind { + TypeKind::Integer => { + // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. + let val = bx.cx().int_width(llty) - 1; + if invert { + bx.cx().const_int(mask_llty, !val as i64) + } else { + bx.cx().const_uint(mask_llty, val) + } + }, + TypeKind::Vector => { + let mask = shift_mask_val( + bx, + bx.cx().element_type(llty), + bx.cx().element_type(mask_llty), + invert + ); + bx.vector_splat(bx.cx().vector_length(mask_llty), mask) + }, + _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), + } +} + +pub fn ty_fn_sig<'ll, 'tcx:'ll, Cx: CodegenMethods<'ll, 'tcx>>( + cx: &Cx, + ty: Ty<'tcx> +) -> ty::PolyFnSig<'tcx> { + match ty.sty { + ty::FnDef(..) | + // Shims currently have type FnPtr. Not sure this should remain. + ty::FnPtr(_) => ty.fn_sig(*cx.tcx()), + ty::Closure(def_id, substs) => { + let tcx = *cx.tcx(); + let sig = substs.closure_sig(def_id, tcx); + + let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); + sig.map_bound(|sig| tcx.mk_fn_sig( + iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), + sig.output(), + sig.variadic, + sig.unsafety, + sig.abi + )) + } + ty::Generator(def_id, substs, _) => { + let tcx = *cx.tcx(); + let sig = substs.poly_sig(def_id, tcx); + + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); + let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); + + sig.map_bound(|sig| { + let state_did = tcx.lang_items().gen_state().unwrap(); + let state_adt_ref = tcx.adt_def(state_did); + let state_substs = tcx.intern_substs(&[ + sig.yield_ty.into(), + sig.return_ty.into(), + ]); + let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); + + tcx.mk_fn_sig(iter::once(env_ty), + ret_ty, + false, + hir::Unsafety::Normal, + Abi::Rust + ) + }) + } + _ => bug!("unexpected type {:?} to ty_fn_sig", ty) + } +} + +pub fn langcall(tcx: TyCtxt, + span: Option, + msg: &str, + li: LangItem) + -> DefId { + match tcx.lang_items().require(li) { + Ok(id) => id, + Err(s) => { + let msg = format!("{} {}", msg, s); + match span { + Some(span) => tcx.sess.span_fatal(span, &msg[..]), + None => tcx.sess.fatal(&msg[..]), + } + } + } +} diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs new file mode 100644 index 0000000000000..2891b1c22fe3b --- /dev/null +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -0,0 +1,92 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax_pos::{BytePos, Span}; +use rustc::hir::def_id::CrateNum; +use std::cell::Cell; + +pub enum FunctionDebugContext { + RegularContext(FunctionDebugContextData), + DebugInfoDisabled, + FunctionWithoutDebugInfo, +} + +impl FunctionDebugContext { + pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData { + match *self { + FunctionDebugContext::RegularContext(ref data) => data, + FunctionDebugContext::DebugInfoDisabled => { + span_bug!(span, "{}", FunctionDebugContext::::debuginfo_disabled_message()); + } + FunctionDebugContext::FunctionWithoutDebugInfo => { + span_bug!(span, "{}", FunctionDebugContext::::should_be_ignored_message()); + } + } + } + + fn debuginfo_disabled_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" + } + + fn should_be_ignored_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext for function that should be \ + ignored by debug info!" + } +} + +/// Enables emitting source locations for the given functions. +/// +/// Since we don't want source locations to be emitted for the function prelude, +/// they are disabled when beginning to codegen a new function. This functions +/// switches source location emitting on and must therefore be called before the +/// first real statement/expression of the function is codegened. +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { + match *dbg_context { + FunctionDebugContext::RegularContext(ref data) => { + data.source_locations_enabled.set(true) + }, + _ => { /* safe to ignore */ } + } +} + +pub struct FunctionDebugContextData { + fn_metadata: D, + source_locations_enabled: Cell, + pub defining_crate: CrateNum, +} + +pub enum VariableAccess<'a, V> { + // The llptr given is an alloca containing the variable's value + DirectVariable { alloca: V }, + // The llptr given is an alloca containing the start of some pointer chain + // leading to the variable's content. + IndirectVariable { alloca: V, address_operations: &'a [i64] } +} + +pub enum VariableKind { + ArgumentVariable(usize /*index*/), + LocalVariable, +} + + +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: Option, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_none() + } +} diff --git a/src/librustc_codegen_ssa/diagnostics.rs b/src/librustc_codegen_ssa/diagnostics.rs new file mode 100644 index 0000000000000..5718d3e50236c --- /dev/null +++ b/src/librustc_codegen_ssa/diagnostics.rs @@ -0,0 +1,37 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +register_long_diagnostics! { + +E0668: r##" +Malformed inline assembly rejected by LLVM. + +LLVM checks the validity of the constraints and the assembly string passed to +it. This error implies that LLVM seems something wrong with the inline +assembly call. + +In particular, it can happen if you forgot the closing bracket of a register +constraint (see issue #51430): +```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) +#![feature(asm)] + +fn main() { + let rax: u64; + unsafe { + asm!("" :"={rax"(rax)); + println!("Accumulator is: {}", rax); + } +} +``` +"##, + +} diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_ssa/glue.rs similarity index 99% rename from src/librustc_codegen_llvm/glue.rs rename to src/librustc_codegen_ssa/glue.rs index cf82316c559c7..6574bec8d94fa 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -14,7 +14,7 @@ use std; -use rustc_codegen_ssa::common::IntPredicate; +use common::IntPredicate; use meth; use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::ty::{self, Ty}; diff --git a/src/librustc_codegen_llvm/interfaces/abi.rs b/src/librustc_codegen_ssa/interfaces/abi.rs similarity index 96% rename from src/librustc_codegen_llvm/interfaces/abi.rs rename to src/librustc_codegen_ssa/interfaces/abi.rs index 3f85b68447dc7..2eed2e8c9b509 100644 --- a/src/librustc_codegen_llvm/interfaces/abi.rs +++ b/src/librustc_codegen_ssa/interfaces/abi.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::FnType; +use rustc_target::abi::call::FnType; use rustc::ty::{FnSig, Ty, Instance}; use super::Backend; use super::builder::HasCodegen; diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_ssa/interfaces/asm.rs similarity index 100% rename from src/librustc_codegen_llvm/interfaces/asm.rs rename to src/librustc_codegen_ssa/interfaces/asm.rs diff --git a/src/librustc_codegen_ssa/interfaces/backend.rs b/src/librustc_codegen_ssa/interfaces/backend.rs index 45f1f273ef967..fc0171d6e3e11 100644 --- a/src/librustc_codegen_ssa/interfaces/backend.rs +++ b/src/librustc_codegen_ssa/interfaces/backend.rs @@ -9,7 +9,7 @@ // except according to those terms. use super::CodegenObject; -use ModuleCodegen; +use {ModuleCodegen, CachedModuleCodegen}; use rustc::session::Session; use rustc::middle::cstore::EncodedMetadata; use rustc::middle::allocator::AllocatorKind; @@ -55,6 +55,8 @@ pub trait BackendMethods { tcx: TyCtxt, module: ModuleCodegen ); + fn submit_pre_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen); + fn submit_post_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen); fn codegen_finished(&self, codegen: &Self::OngoingCodegen, tcx: TyCtxt); fn check_for_errors(&self, codegen: &Self::OngoingCodegen, sess: &Session); fn wait_for_signal_to_codegen_item(&self, codegen: &Self::OngoingCodegen); diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs similarity index 99% rename from src/librustc_codegen_llvm/interfaces/builder.rs rename to src/librustc_codegen_ssa/interfaces/builder.rs index bfc22f8e064e2..03dbd37dce59e 100644 --- a/src/librustc_codegen_llvm/interfaces/builder.rs +++ b/src/librustc_codegen_ssa/interfaces/builder.rs @@ -8,12 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, AtomicOrdering, +use common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; use libc::c_char; use rustc::ty::TyCtxt; use rustc::ty::layout::{Align, Size}; -use builder::MemFlags; +use MemFlags; use super::Backend; use super::CodegenMethods; use super::debuginfo::DebugInfoBuilderMethods; diff --git a/src/librustc_codegen_llvm/interfaces/consts.rs b/src/librustc_codegen_ssa/interfaces/consts.rs similarity index 100% rename from src/librustc_codegen_llvm/interfaces/consts.rs rename to src/librustc_codegen_ssa/interfaces/consts.rs diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs similarity index 84% rename from src/librustc_codegen_llvm/interfaces/debuginfo.rs rename to src/librustc_codegen_ssa/interfaces/debuginfo.rs index 4b9a100b2fbe2..0e8411d7f3792 100644 --- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs +++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs @@ -12,7 +12,7 @@ use rustc::ty::{Ty, FnSig}; use super::Backend; use super::builder::HasCodegen; use rustc::mir; -use monomorphize::Instance; +use rustc_mir::monomorphize::Instance; use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; use rustc_data_structures::indexed_vec::IndexVec; use syntax_pos; @@ -40,12 +40,12 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { sig: FnSig<'tcx>, llfn: Self::Value, mir: &mir::Mir, - ) -> FunctionDebugContext<'ll>; + ) -> FunctionDebugContext; fn create_mir_scopes( &self, mir: &mir::Mir, - debug_context: &FunctionDebugContext<'ll>, + debug_context: &FunctionDebugContext, ) -> IndexVec>; fn extend_scope_to_file( &self, @@ -54,12 +54,13 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { defining_crate: CrateNum, ) -> Self::DIScope; fn debuginfo_finalize(&self); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64]; } pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn declare_local( &self, - dbg_context: &FunctionDebugContext<'ll>, + dbg_context: &FunctionDebugContext<>::DIScope>, variable_name: Name, variable_type: Ty<'tcx>, scope_metadata: >::DIScope, @@ -69,7 +70,7 @@ pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, ); fn set_source_location( &self, - debug_context: &FunctionDebugContext<'ll>, + debug_context: &FunctionDebugContext<>::DIScope>, scope: Option<>::DIScope>, span: syntax_pos::Span, ); diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_ssa/interfaces/intrinsic.rs similarity index 97% rename from src/librustc_codegen_llvm/interfaces/intrinsic.rs rename to src/librustc_codegen_ssa/interfaces/intrinsic.rs index f7491758c783a..8e4b5d64f2eba 100644 --- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs +++ b/src/librustc_codegen_ssa/interfaces/intrinsic.rs @@ -12,7 +12,7 @@ use super::Backend; use super::builder::HasCodegen; use mir::operand::OperandRef; use rustc::ty::Ty; -use abi::FnType; +use rustc_target::abi::call::FnType; use syntax_pos::Span; pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { diff --git a/src/librustc_codegen_ssa/interfaces/misc.rs b/src/librustc_codegen_ssa/interfaces/misc.rs index e01fb60a3c501..f22933d2641ee 100644 --- a/src/librustc_codegen_ssa/interfaces/misc.rs +++ b/src/librustc_codegen_ssa/interfaces/misc.rs @@ -32,6 +32,7 @@ pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn consume_stats(self) -> RefCell; fn codegen_unit(&self) -> &Arc>; fn statics_to_rauw(&self) -> &RefCell>; + fn env_alloca_allowed(&self) -> bool; fn used_statics(&self) -> &RefCell>; fn set_frame_pointer_elimination(&self, llfn: Self::Value); fn apply_target_cpu_attr(&self, llfn: Self::Value); diff --git a/src/librustc_codegen_ssa/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs index 155730f3fc9cc..2cbe94749a0b1 100644 --- a/src/librustc_codegen_ssa/interfaces/mod.rs +++ b/src/librustc_codegen_ssa/interfaces/mod.rs @@ -13,10 +13,32 @@ mod backend; mod misc; mod statics; mod declare; +mod builder; +mod consts; +mod type_; +mod intrinsic; +mod debuginfo; +mod abi; +mod asm; pub use self::backend::{Backend, BackendMethods}; pub use self::misc::MiscMethods; pub use self::statics::StaticMethods; pub use self::declare::{DeclareMethods, PreDefineMethods}; +pub use self::builder::{BuilderMethods, HasCodegen}; +pub use self::consts::ConstMethods; +pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, + LayoutTypeMethods, ArgTypeMethods}; +pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; +pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; +pub use self::abi::{AbiMethods, AbiBuilderMethods}; +pub use self::asm::{AsmMethods, AsmBuilderMethods}; + pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} + +pub trait CodegenMethods<'ll, 'tcx: 'll> : + Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + + StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + + IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods + + PreDefineMethods<'ll, 'tcx> {} diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_ssa/interfaces/type_.rs similarity index 96% rename from src/librustc_codegen_llvm/interfaces/type_.rs rename to src/librustc_codegen_ssa/interfaces/type_.rs index d31237c6f0999..1b3d729234c4d 100644 --- a/src/librustc_codegen_llvm/interfaces/type_.rs +++ b/src/librustc_codegen_ssa/interfaces/type_.rs @@ -10,7 +10,7 @@ use super::Backend; use super::builder::HasCodegen; -use rustc_codegen_ssa::common::TypeKind; +use common::TypeKind; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use std::cell::RefCell; @@ -108,6 +108,8 @@ pub trait LayoutTypeMethods<'ll, 'tcx> : Backend<'ll> { fn reg_backend_type(&self, ty: &Reg) -> Self::Type; fn immediate_backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool; + fn is_backend_scalar_pair(&self, ty: &TyLayout<'tcx>) -> bool; + fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64; fn scalar_pair_element_backend_type<'a>( &self, ty: &TyLayout<'tcx>, diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs index 7c062781fc0ad..9c4dfb0825ad5 100644 --- a/src/librustc_codegen_ssa/lib.rs +++ b/src/librustc_codegen_ssa/lib.rs @@ -20,28 +20,49 @@ #![feature(box_syntax)] #![feature(custom_attribute)] #![feature(libc)] +#![feature(rustc_diagnostic_macros)] +#![feature(in_band_lifetimes)] +#![feature(slice_sort_by_cached_key)] #![feature(nll)] #![allow(unused_attributes)] #![allow(dead_code)] #![feature(quote)] -#![feature(rustc_diagnostic_macros)] -#![recursion_limit="256"] - -extern crate rustc; +#[macro_use] extern crate bitflags; +#[macro_use] extern crate log; +extern crate rustc_apfloat; +#[macro_use] extern crate rustc; extern crate rustc_target; extern crate rustc_mir; -extern crate syntax; +#[macro_use] extern crate syntax; extern crate syntax_pos; +extern crate rustc_incremental; +extern crate rustc_codegen_utils; extern crate rustc_data_structures; extern crate libc; use std::path::PathBuf; use rustc::dep_graph::WorkProduct; use rustc::session::config::{OutputFilenames, OutputType}; +use rustc::middle::lang_items::LangItem; +use rustc::hir::def_id::CrateNum; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync::Lrc; +use rustc::middle::cstore::{LibSource, CrateSource, NativeLibrary}; + +// NB: This module needs to be declared first so diagnostics are +// registered before they are used. +mod diagnostics; pub mod common; pub mod interfaces; +pub mod mir; +pub mod debuginfo; +pub mod base; +pub mod callee; +pub mod glue; +pub mod meth; +pub mod mono_item; pub struct ModuleCodegen { /// The name of the module. When the crate may be saved between @@ -111,5 +132,31 @@ pub enum ModuleKind { Allocator, } +bitflags! { + pub struct MemFlags: u8 { + const VOLATILE = 1 << 0; + const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; + } +} + +/// Misc info we load from metadata to persist beyond the tcx +struct CrateInfo { + panic_runtime: Option, + compiler_builtins: Option, + profiler_runtime: Option, + sanitizer_runtime: Option, + is_no_builtins: FxHashSet, + native_libraries: FxHashMap>>, + crate_name: FxHashMap, + used_libraries: Lrc>, + link_args: Lrc>, + used_crate_source: FxHashMap>, + used_crates_static: Vec<(CrateNum, LibSource)>, + used_crates_dynamic: Vec<(CrateNum, LibSource)>, + wasm_imports: FxHashMap, + lang_item_to_crate: FxHashMap, + missing_lang_items: FxHashMap>, +} __build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_ssa/meth.rs similarity index 98% rename from src/librustc_codegen_llvm/meth.rs rename to src/librustc_codegen_ssa/meth.rs index c52082fe2cff3..f4597fa829dd6 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -8,9 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::FnType; +use rustc_target::abi::call::FnType; use callee; -use monomorphize; +use rustc_mir::monomorphize; use interfaces::*; diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs similarity index 98% rename from src/librustc_codegen_llvm/mir/analyze.rs rename to src/librustc_codegen_ssa/mir/analyze.rs index 0fabcf7cfd6b0..850b2c24af851 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_ssa/mir/analyze.rs @@ -19,7 +19,6 @@ use rustc::mir::visit::{Visitor, PlaceContext}; use rustc::mir::traversal; use rustc::ty::{self, Ty}; use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; -use type_of::LayoutLlvmExt; use super::FunctionCx; use interfaces::*; @@ -37,10 +36,10 @@ pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<' let ty = fx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); let layout = fx.cx.layout_of(ty); - if layout.is_llvm_immediate() { + if fx.cx.is_backend_immediate(&layout) { // These sorts of types are immediates that we can store // in an Value without an alloca. - } else if layout.is_llvm_scalar_pair() { + } else if fx.cx.is_backend_scalar_pair(&layout) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -189,7 +188,7 @@ impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods< if let mir::ProjectionElem::Field(..) = proj.elem { let layout = cx.layout_of(base_ty.to_ty(*cx.tcx())); - if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { + if cx.is_backend_immediate(&layout) || cx.is_backend_scalar_pair(&layout) { // Recurse with the same context, instead of `Projection`, // potentially stopping at non-operand projections, // which would trigger `not_ssa` on locals. diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs similarity index 99% rename from src/librustc_codegen_llvm/mir/block.rs rename to src/librustc_codegen_ssa/mir/block.rs index cc50243d398c0..de39b180bc59c 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -13,14 +13,13 @@ use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, TyLayout}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; -use abi::{Abi, FnType, PassMode}; -use rustc_target::abi::call::ArgType; +use rustc_target::abi::call::{ArgType, FnType, PassMode}; +use rustc_target::spec::abi::Abi; use base; -use builder::MemFlags; -use common; -use rustc_codegen_ssa::common::IntPredicate; +use MemFlags; +use common::{self, IntPredicate}; use meth; -use monomorphize; +use rustc_mir::monomorphize; use interfaces::*; diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs similarity index 95% rename from src/librustc_codegen_llvm/mir/constant.rs rename to src/librustc_codegen_ssa/mir/constant.rs index 94a6df294c57a..6c9359716251d 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -8,18 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::mir::interpret::{ConstEvalErr, read_target_uint}; +use rustc::mir::interpret::ConstEvalErr; use rustc_mir::const_eval::const_field; -use rustc::hir::def_id::DefId; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::sync::Lrc; -use rustc::mir::interpret::{GlobalId, Pointer, Allocation, ConstValue}; +use rustc::mir::interpret::{GlobalId, ConstValue}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, TyLayout, HasTyCtxt}; -use common::CodegenCx; +use rustc::ty::layout::{self, LayoutOf, TyLayout, HasTyCtxt}; use syntax::source_map::Span; -use value::Value; use interfaces::*; use super::FunctionCx; diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs similarity index 97% rename from src/librustc_codegen_llvm/mir/mod.rs rename to src/librustc_codegen_ssa/mir/mod.rs index ac7790b177c26..c6b7c58e3ea0f 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -9,18 +9,17 @@ // except according to those terms. use libc::c_uint; -use llvm; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; -use base; -use rustc_codegen_ssa::common::Funclet; +use common::Funclet; use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; -use monomorphize::Instance; -use abi::{FnType, PassMode}; +use rustc_mir::monomorphize::Instance; +use rustc_target::abi::call::{FnType, PassMode}; use interfaces::*; +use base; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -30,8 +29,6 @@ use std::iter; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::IndexVec; -pub use self::constant::codegen_static_initializer; - use self::analyze::CleanupKind; use self::place::PlaceRef; use rustc::mir::traversal; @@ -44,7 +41,7 @@ pub struct FunctionCx<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods< mir: &'a mir::Mir<'tcx>, - debug_context: FunctionDebugContext<'ll>, + debug_context: FunctionDebugContext<>::DIScope>, llfn: Cx::Value, @@ -625,7 +622,7 @@ fn arg_local_refs<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, ' // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. // (cuviper) It seems to be fine without the alloca on LLVM 6 and later. - let env_alloca = !env_ref && unsafe { llvm::LLVMRustVersionMajor() < 6 }; + let env_alloca = !env_ref && bx.cx().env_alloca_allowed(); let env_ptr = if env_alloca { let scratch = PlaceRef::alloca(bx, bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)), @@ -639,12 +636,7 @@ fn arg_local_refs<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, ' for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); - let ops = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref(), - llvm::LLVMRustDIBuilderCreateOpPlusUconst(), - byte_offset_of_var_in_env as i64, - llvm::LLVMRustDIBuilderCreateOpDeref()] - }; + let ops = bx.cx().debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env); // The environment and the capture can each be indirect. diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs similarity index 95% rename from src/librustc_codegen_llvm/mir/operand.rs rename to src/librustc_codegen_ssa/mir/operand.rs index 80599fb2e0566..5094f1b9c2b03 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -15,8 +15,7 @@ use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasTyCtxt}; use rustc_data_structures::sync::Lrc; use base; -use builder::{Builder, MemFlags}; -use value::Value; +use MemFlags; use glue; use interfaces::*; @@ -283,34 +282,34 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { } } -impl OperandValue<&'ll Value> { - - pub fn volatile_store( - self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dest: PlaceRef<'tcx, &'ll Value> - ) { - self.store_with_flags(bx, dest, MemFlags::VOLATILE); - } - - pub fn unaligned_volatile_store( - self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dest: PlaceRef<'tcx, &'ll Value> - ) { - self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); - } -} - -impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> { - pub fn nontemporal_store( - self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, - dest: PlaceRef<'tcx, &'ll Value> - ) { - self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); - } -} +// impl OperandValue<&'ll Value> { +// +// pub fn volatile_store( +// self, +// bx: &Builder<'a, 'll, 'tcx, &'ll Value>, +// dest: PlaceRef<'tcx, &'ll Value> +// ) { +// self.store_with_flags(bx, dest, MemFlags::VOLATILE); +// } +// +// pub fn unaligned_volatile_store( +// self, +// bx: &Builder<'a, 'll, 'tcx, &'ll Value>, +// dest: PlaceRef<'tcx, &'ll Value> +// ) { +// self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); +// } +// } +// +// impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> { +// pub fn nontemporal_store( +// self, +// bx: &Builder<'a, 'll, 'tcx, &'ll Value>, +// dest: PlaceRef<'tcx, &'ll Value> +// ) { +// self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); +// } +// } impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { fn store_with_flags>( diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs similarity index 99% rename from src/librustc_codegen_llvm/mir/place.rs rename to src/librustc_codegen_ssa/mir/place.rs index e4b17300b4741..f893796deab6a 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -12,8 +12,7 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use rustc_codegen_ssa::common::IntPredicate; -use type_of::LayoutLlvmExt; +use common::IntPredicate; use glue; use interfaces::*; @@ -120,7 +119,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx))); bx.struct_gep(self.llval, 1) } else { - bx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + bx.struct_gep(self.llval, bx.cx().backend_field_index(&self.layout, ix)) }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs similarity index 99% rename from src/librustc_codegen_llvm/mir/rvalue.rs rename to src/librustc_codegen_ssa/mir/rvalue.rs index 95e76fe9cc727..7514cbd903073 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -18,10 +18,8 @@ use std::{u128, i128}; use base; use callee; -use common; -use rustc_codegen_ssa::common::{RealPredicate, IntPredicate}; -use monomorphize; -use type_of::LayoutLlvmExt; +use common::{self, RealPredicate, IntPredicate}; +use rustc_mir::monomorphize; use interfaces::*; @@ -54,7 +52,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. - if dest.layout.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(&dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); @@ -244,7 +242,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> operand.val } mir::CastKind::Unsize => { - assert!(cast.is_llvm_scalar_pair()); + assert!(bx.cx().is_backend_scalar_pair(&cast)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -270,9 +268,9 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } } } - mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { + mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(&operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if cast.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(&cast) { let data_cast = bx.pointercast(data_ptr, bx.cx().scalar_pair_element_backend_type(&cast, 0, true)); OperandValue::Pair(data_cast, meta) @@ -288,7 +286,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } } mir::CastKind::Misc => { - assert!(cast.is_llvm_immediate()); + assert!(bx.cx().is_backend_immediate(&cast)); let ll_t_out = bx.cx().immediate_backend_type(&cast); if operand.layout.abi.is_uninhabited() { let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs similarity index 100% rename from src/librustc_codegen_llvm/mir/statement.rs rename to src/librustc_codegen_ssa/mir/statement.rs diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_ssa/mono_item.rs similarity index 58% rename from src/librustc_codegen_llvm/mono_item.rs rename to src/librustc_codegen_ssa/mono_item.rs index 09c1bb419678a..5bdef69757ed6 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_ssa/mono_item.rs @@ -14,20 +14,13 @@ //! item-path. This is used for unit testing the code that generates //! paths etc in all kinds of annoying scenarios. -use attributes; use base; -use context::CodegenCx; -use llvm; -use monomorphize::Instance; -use type_of::LayoutLlvmExt; use rustc::hir; use rustc::hir::def::Def; -use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; -use rustc::ty::{TypeFoldable, Ty}; +use rustc::ty::Ty; use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use std::fmt; -use value::Value; use interfaces::*; pub use rustc::mir::mono::MonoItem; @@ -126,72 +119,3 @@ pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll> : fmt::Debug + BaseMonoItemExt<'ll } } } - -impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx> - for MonoItem<'tcx> {} - -impl<'ll, 'tcx: 'll> PreDefineMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - fn predefine_static(&self, - def_id: DefId, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - let instance = Instance::mono(self.tcx, def_id); - let ty = instance.ty(self.tcx); - let llty = self.layout_of(ty).llvm_type(self); - - let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { - self.sess().span_fatal(self.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name)) - }); - - unsafe { - llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); - llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); - } - - self.instances.borrow_mut().insert(instance, g); - } - - fn predefine_fn(&self, - instance: Instance<'tcx>, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - assert!(!instance.substs.needs_infer() && - !instance.substs.has_param_types()); - - let mono_ty = instance.ty(self.tcx); - let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); - let lldecl = self.declare_fn(symbol_name, mono_ty); - unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; - base::set_link_section(lldecl, &attrs); - if linkage == Linkage::LinkOnceODR || - linkage == Linkage::WeakODR { - llvm::SetUniqueComdat(self.llmod, lldecl); - } - - // If we're compiling the compiler-builtins crate, e.g. the equivalent of - // compiler-rt, then we want to implicitly compile everything with hidden - // visibility as we're going to link this object all over the place but - // don't want the symbols to get exported. - if linkage != Linkage::Internal && linkage != Linkage::Private && - self.tcx.is_compiler_builtins(LOCAL_CRATE) { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); - } - } else { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); - } - } - - debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); - if instance.def.is_inline(self.tcx) { - attributes::inline(self, lldecl, attributes::InlineAttr::Hint); - } - attributes::from_fn_attrs(self, lldecl, Some(instance.def.def_id())); - - self.instances.borrow_mut().insert(instance, lldecl); - } -} diff --git a/src/librustc_codegen_ssa/type_of.rs b/src/librustc_codegen_ssa/type_of.rs new file mode 100644 index 0000000000000..e69de29bb2d1d From 9fe9347f55e77d72a443cdc2df48942247bbc71c Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 3 Oct 2018 16:56:24 +0200 Subject: [PATCH 69/76] Finished moving backend-agnostic code to rustc_codegen_ssa --- src/librustc_codegen_llvm/abi.rs | 11 ++-- src/librustc_codegen_llvm/asm.rs | 6 +- src/librustc_codegen_llvm/attributes.rs | 2 +- src/librustc_codegen_llvm/back/link.rs | 3 +- src/librustc_codegen_llvm/back/write.rs | 3 +- src/librustc_codegen_llvm/base.rs | 57 ++++--------------- src/librustc_codegen_llvm/builder.rs | 12 ++-- src/librustc_codegen_llvm/callee.rs | 8 +-- src/librustc_codegen_llvm/common.rs | 16 +----- src/librustc_codegen_llvm/consts.rs | 11 ++-- src/librustc_codegen_llvm/context.rs | 19 ++++--- .../debuginfo/create_scope_map.rs | 8 +-- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- .../debuginfo/metadata.rs | 2 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 27 ++++----- .../debuginfo/source_loc.rs | 8 +-- .../debuginfo/type_names.rs | 2 +- src/librustc_codegen_llvm/debuginfo/utils.rs | 2 +- src/librustc_codegen_llvm/declare.rs | 4 +- src/librustc_codegen_llvm/interfaces/mod.rs | 15 ----- src/librustc_codegen_llvm/intrinsic.rs | 16 +++--- src/librustc_codegen_llvm/lib.rs | 25 +++----- .../{mono-item.rs => mono_item.rs} | 13 +---- src/librustc_codegen_llvm/type_.rs | 4 +- src/librustc_codegen_llvm/type_of.rs | 2 +- src/librustc_codegen_llvm/value.rs | 2 +- src/librustc_codegen_ssa/base.rs | 2 +- src/librustc_codegen_ssa/debuginfo.rs | 4 +- .../interfaces/debuginfo.rs | 2 +- src/librustc_codegen_ssa/lib.rs | 32 +++++------ src/librustc_codegen_ssa/mir/operand.rs | 53 ++++++++--------- src/librustc_codegen_ssa/mono_item.rs | 3 + 32 files changed, 150 insertions(+), 226 deletions(-) delete mode 100644 src/librustc_codegen_llvm/interfaces/mod.rs rename src/librustc_codegen_llvm/{mono-item.rs => mono_item.rs} (93%) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 253979cd6be41..156aef3a100df 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -9,17 +9,18 @@ // except according to those terms. use llvm::{self, AttributePlace}; -use builder::{Builder, MemFlags}; -use common::ty_fn_sig; +use rustc_codegen_ssa::MemFlags; +use builder::Builder; +use rustc_codegen_ssa::common::ty_fn_sig; use context::CodegenCx; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; use rustc_target::abi::call::ArgType; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use rustc_target::abi::{LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty, Instance}; diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 90a4363602fad..ed5548bcefbc2 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -15,10 +15,10 @@ use builder::Builder; use value::Value; use rustc::hir; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use std::ffi::CString; use libc::{c_uint, c_char}; diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index c8d789e821ad1..c9a1436f97fe8 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -20,7 +20,7 @@ use rustc::ty::query::Providers; use rustc_data_structures::sync::Lrc; use rustc_data_structures::fx::FxHashMap; use rustc_target::spec::PanicStrategy; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use attributes; use llvm::{self, Attribute}; diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 86c6a5e65b0e9..2e84d223a8b94 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -24,7 +24,8 @@ use rustc::session::search_paths::PathKind; use rustc::session::Session; use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; -use {CodegenResults, CrateInfo}; +use rustc_codegen_ssa::CrateInfo; +use CodegenResults; use rustc::util::common::time; use rustc_fs_util::fix_windows_verbatim_for_gcc; use rustc::hir::def_id::CrateNum; diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 561950c5e5b46..c84f79549a488 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -30,8 +30,7 @@ use time_graph::{self, TimeGraph, Timeline}; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; use {CodegenResults, ModuleLlvm}; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule}; -use CrateInfo; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo}; use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc::ty::TyCtxt; use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry}; diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index a19fbbc506d7d..d908e73d14e04 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -24,69 +24,34 @@ //! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. use super::ModuleLlvm; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, CachedModuleCodegen}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use super::LlvmCodegenBackend; -use abi; use back::write; use llvm; use metadata; -use rustc::dep_graph::cgu_reuse_tracker::CguReuse; -use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use rustc::middle::lang_items::StartFnLangItem; -use rustc::middle::weak_lang_items; -use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder}; +use rustc::mir::mono::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata}; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; -use rustc::ty::query::Providers; -use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::ty::TyCtxt; use rustc::middle::exported_symbols; -use rustc::util::common::{time, print_time_passes_entry}; -use rustc::util::profiling::ProfileCategory; -use rustc::session::config::{self, DebugInfo, EntryFnType, Lto}; -use rustc::session::Session; -use rustc_incremental; -use mir::place::PlaceRef; -use builder::{Builder, MemFlags}; -use callee; -use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; -use rustc_mir::monomorphize::item::DefPathBasedNames; +use rustc::session::config::{self, DebugInfo}; +use builder::Builder; use common; -use rustc_codegen_ssa::common::{RealPredicate, TypeKind, IntPredicate}; -use meth; -use mir; use context::CodegenCx; -use monomorphize::Instance; -use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; -use rustc_codegen_utils::symbol_names_test; -use time_graph; -use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt}; - -use rustc::util::nodemap::{FxHashMap, DefIdSet}; -use CrateInfo; +use monomorphize::partitioning::CodegenUnitExt; +use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::sync::Lrc; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; -use std::any::Any; use std::ffi::CString; -use std::sync::Arc; -use std::time::{Instant, Duration}; -use std::cmp; -use std::sync::mpsc; -use syntax_pos::Span; +use std::time::Instant; use syntax_pos::symbol::InternedString; -use syntax::attr; -use rustc::hir::{self, CodegenFnAttrs}; +use rustc::hir::CodegenFnAttrs; use value::Value; -use mir::operand::OperandValue; - -use rustc_codegen_utils::check_for_rustc_errors_attr; -use std::marker::PhantomData; pub(crate) fn write_metadata<'a, 'gcx>( diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 24564e7628fce..9e2429f5fa1c8 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -11,7 +11,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, OperandBundleDef, BasicBlock}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; -use rustc_codegen_ssa; +use rustc_codegen_ssa::{self, MemFlags}; use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; @@ -21,11 +21,11 @@ use rustc::ty::TyCtxt; use rustc::ty::layout::{self, Align, Size}; use rustc::session::config; use rustc_data_structures::small_c_str::SmallCStr; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use syntax; -use base; -use mir::operand::{OperandValue, OperandRef}; -use mir::place::PlaceRef; +use rustc_codegen_ssa::base::to_immediate; +use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; +use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; use std::ops::Range; use std::ptr; @@ -573,7 +573,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } load }); - OperandValue::Immediate(base::to_immediate(self, llval, ptr.layout)) + OperandValue::Immediate(to_immediate(self, llval, ptr.layout)) } else if let layout::Abi::ScalarPair(ref a, ref b) = ptr.layout.abi { let load = |i, scalar: &layout::Scalar| { let llptr = self.struct_gep(ptr.llval, i as u64); diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index f02544a46413a..74a532b50fc57 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -15,17 +15,15 @@ //! closure. use attributes; -use common; +use rustc_codegen_ssa::common; use llvm; use monomorphize::Instance; use context::CodegenCx; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; -use rustc::hir::def_id::DefId; -use rustc::ty::{self, TypeFoldable}; +use rustc::ty::TypeFoldable; use rustc::ty::layout::LayoutOf; -use rustc::ty::subst::Substs; /// Codegens a reference to a fn/method item, monomorphizing and /// inlining as it goes. diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index ae2f6f2a184f6..6f84e96e8e338 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -13,32 +13,22 @@ //! Code that is useful in various codegen modules. use llvm::{self, True, False, Bool, BasicBlock}; -use rustc::hir::def_id::DefId; -use rustc::middle::lang_items::LangItem; use abi; -use base; use consts; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; -use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; use rustc::mir::interpret::{Scalar, AllocType, Allocation}; -use rustc::hir; -use interfaces::BuilderMethods; -use mir::constant::const_alloc_to_llvm; -use mir::place::PlaceRef; -use rustc_codegen_ssa::common::TypeKind; +use consts::const_alloc_to_llvm; +use rustc_codegen_ssa::mir::place::PlaceRef; use libc::{c_uint, c_char}; -use std::iter; -use rustc_target::spec::abi::Abi; use syntax::symbol::LocalInternedString; use syntax::ast::Mutability; -use syntax_pos::Span; pub use context::CodegenCx; diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 18d78591e8122..588f4469ac4f8 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -11,21 +11,24 @@ use libc::c_uint; use llvm::{self, SetUnnamedAddr, True}; use rustc::hir::def_id::DefId; +use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, Pointer, ConstEvalErr, GlobalId}; use rustc::hir::Node; use debuginfo; -use base; use monomorphize::MonoItem; use common::CodegenCx; use monomorphize::Instance; use syntax_pos::Span; +use rustc_target::abi::HasDataLayout; use syntax_pos::symbol::LocalInternedString; +use base; use type_::Type; use type_of::LayoutLlvmExt; +use rustc_data_structures::sync::Lrc; use value::Value; use rustc::ty::{self, Ty}; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; -use rustc::ty::layout::{Align, LayoutOf}; +use rustc::ty::layout::{self, Size, Align, LayoutOf}; use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; @@ -363,7 +366,7 @@ impl StaticMethods<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { unsafe { let attrs = &self.tcx.codegen_fn_attrs(def_id); - let (v, alloc) = match ::mir::codegen_static_initializer(&self, def_id) { + let (v, alloc) = match codegen_static_initializer(&self, def_id) { Ok(v) => v, // Error has already been reported Err(_) => return, diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 42b65a4c95688..f38307eeb9357 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -13,15 +13,13 @@ use llvm; use rustc::dep_graph::DepGraphSafe; use rustc::hir; use debuginfo; -use callee; -use base; use monomorphize::Instance; use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use libc::c_uint; use rustc_data_structures::base_n; @@ -33,6 +31,9 @@ use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_target::spec::{HasTargetSpec, Target}; +use rustc_codegen_ssa::callee::resolve_and_get_fn; +use rustc_codegen_ssa::base::wants_msvc_seh; +use callee::get_fn; use std::ffi::CStr; use std::cell::{Cell, RefCell}; @@ -327,7 +328,7 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value { - callee::get_fn(&&self,instance) + get_fn(&&self,instance) } fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { @@ -360,11 +361,11 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } let tcx = self.tcx; let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.sess()) => { - callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) + Some(def_id) if !wants_msvc_seh(self.sess()) => { + resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) } _ => { - let name = if base::wants_msvc_seh(self.sess()) { + let name = if wants_msvc_seh(self.sess()) { "__CxxFrameHandler3" } else { "rust_eh_personality" @@ -390,7 +391,7 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { let tcx = self.tcx; assert!(self.sess().target.target.options.custom_unwind_resume); if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { - let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); + let llfn = resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); unwresume.set(Some(llfn)); return llfn; } @@ -446,7 +447,7 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { attributes::apply_target_cpu_attr(self, llfn) } - fn env_alloca_allowed(&self) { + fn env_alloca_allowed(&self) -> bool { unsafe { llvm::LLVMRustVersionMajor() < 6 } } diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index d7180daeced74..31fce4d2655b2 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -8,12 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{FunctionDebugContext, FunctionDebugContextData}; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, FunctionDebugContextData, MirDebugScope}; use super::metadata::file_metadata; use super::utils::{DIB, span_start}; use llvm; -use llvm::debuginfo::DIScope; +use llvm::debuginfo::{DIScope, DISubprogram}; use common::CodegenCx; use rustc::mir::{Mir, SourceScope}; use value::Value; @@ -32,7 +32,7 @@ use syntax_pos::BytePos; pub fn create_mir_scopes( cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, - debug_context: &FunctionDebugContext<'ll>, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, ) -> IndexVec> { let null_scope = MirDebugScope { scope_metadata: None, @@ -68,7 +68,7 @@ pub fn create_mir_scopes( fn make_mir_scope(cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, has_variables: &BitSet, - debug_context: &FunctionDebugContextData<'ll>, + debug_context: &FunctionDebugContextData<&'ll DISubprogram>, scope: SourceScope, scopes: &mut IndexVec>) { if scopes[scope].is_valid() { diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index a9ec5e571702a..d1587ec19963e 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -16,7 +16,7 @@ use common::CodegenCx; use builder::Builder; use rustc::session::config::DebugInfo; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use syntax::attr; diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index c143a1914da63..cd728f3801480 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -17,7 +17,7 @@ use super::utils::{debug_context, DIB, span_start, use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use abi; use value::Value; diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 2c97de1b95f58..345a743a7513d 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -11,8 +11,8 @@ // See doc.rs for documentation. mod doc; -use self::VariableAccess::*; -use self::VariableKind::*; +use rustc_codegen_ssa::debuginfo::VariableAccess::*; +use rustc_codegen_ssa::debuginfo::VariableKind::*; use self::utils::{DIB, span_start, create_DIArray, is_node_local_to_unit}; use self::namespace::mangled_name_of_instance; @@ -21,8 +21,8 @@ use self::metadata::{type_metadata, file_metadata, TypeMap}; use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; use llvm; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags, - DILexicalBlock}; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DIArray, DIFlags, + DILexicalBlock, DISubprogram}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def_id::{DefId, CrateNum}; use rustc::ty::subst::{Substs, UnpackedKind}; @@ -38,6 +38,8 @@ use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::indexed_vec::IndexVec; use value::Value; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, + VariableKind, FunctionDebugContextData}; use libc::c_uint; use std::cell::{Cell, RefCell}; @@ -47,7 +49,7 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; use rustc::ty::layout::{self, LayoutOf}; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; pub mod gdb; mod utils; @@ -57,8 +59,7 @@ pub mod metadata; mod create_scope_map; mod source_loc; -pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; -pub use self::source_loc::start_emitting_source_locations; +pub use self::create_scope_map::{create_mir_scopes}; pub use self::metadata::create_global_var_metadata; pub use self::metadata::extend_scope_to_file; pub use self::source_loc::set_source_location; @@ -162,7 +163,7 @@ impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> { fn declare_local( &self, - dbg_context: &FunctionDebugContext<'ll>, + dbg_context: &FunctionDebugContext<&'ll DISubprogram>, variable_name: ast::Name, variable_type: Ty<'tcx>, scope_metadata: &'ll DIScope, @@ -228,7 +229,7 @@ impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> fn set_source_location( &self, - debug_context: &FunctionDebugContext<'ll>, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, scope: Option<&'ll DIScope>, span: Span, ) { @@ -249,7 +250,7 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V sig: ty::FnSig<'tcx>, llfn: &'ll Value, mir: &mir::Mir, - ) -> FunctionDebugContext<'ll> { + ) -> FunctionDebugContext<&'ll DISubprogram> { if self.sess().opts.debuginfo == DebugInfo::None { return FunctionDebugContext::DebugInfoDisabled; } @@ -532,7 +533,7 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V fn create_mir_scopes( &self, mir: &mir::Mir, - debug_context: &FunctionDebugContext<'ll>, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, ) -> IndexVec> { create_scope_map::create_mir_scopes(&self, mir, debug_context) } @@ -550,12 +551,12 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V finalize(self) } - fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64] { + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4] { unsafe { [llvm::LLVMRustDIBuilderCreateOpDeref(), llvm::LLVMRustDIBuilderCreateOpPlusUconst(), byte_offset_of_var_in_env as i64, llvm::LLVMRustDIBuilderCreateOpDeref()] - }; + } } } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 17c93288d504b..166636b901ca4 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -12,12 +12,12 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; use super::metadata::UNKNOWN_COLUMN_NUMBER; -use super::FunctionDebugContext; +use rustc_codegen_ssa::debuginfo::FunctionDebugContext; use llvm; -use llvm::debuginfo::DIScope; +use llvm::debuginfo::{DIScope, DISubprogram}; use builder::Builder; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use libc::c_uint; use syntax_pos::{Span, Pos}; @@ -27,7 +27,7 @@ use value::Value; /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). pub fn set_source_location( - debug_context: &FunctionDebugContext<'ll>, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, bx: &Builder<'_, 'll, '_, &'ll Value>, scope: Option<&'ll DIScope>, span: Span, diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index ab23b71b63b44..92ca4aac9f9c4 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -15,7 +15,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use rustc::hir; diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index e99ec0d1b577c..e69ab0f102cae 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -20,7 +20,7 @@ use llvm; use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; use common::{CodegenCx}; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use syntax_pos::{self, Span}; diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index ab87490b74f5c..25c691b1c5e40 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -30,9 +30,9 @@ use rustc_target::spec::PanicStrategy; use abi::{Abi, FnType, FnTypeExt}; use attributes; use context::CodegenCx; -use common; +use rustc_codegen_ssa::common; use type_::Type; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use value::Value; /// Declare a function. diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs deleted file mode 100644 index e41ab4576073c..0000000000000 --- a/src/librustc_codegen_llvm/interfaces/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use rustc_codegen_ssa::interfaces::{Backend, BackendMethods, CodegenObject, MiscMethods, - StaticMethods, DeclareMethods, PreDefineMethods, BuilderMethods, HasCodegen, ConstMethods, - TypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, ArgTypeMethods, - IntrinsicCallMethods, IntrinsicDeclarationMethods, DebugInfoMethods, DebugInfoBuilderMethods, - AbiMethods, AbiBuilderMethods, AsmMethods, AsmBuilderMethods, CodegenMethods}; diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 7d8b3c8d61741..4b873b3f4729f 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -14,11 +14,11 @@ use attributes; use intrinsics::{self, Intrinsic}; use llvm; use abi::{Abi, FnType, LlvmType, PassMode}; -use mir::place::PlaceRef; -use mir::operand::{OperandRef, OperandValue}; -use base::*; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::glue; +use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types}; use context::CodegenCx; -use glue; use type_::Type; use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; @@ -30,7 +30,7 @@ use syntax::symbol::Symbol; use builder::Builder; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use rustc::session::Session; use syntax_pos::Span; @@ -259,12 +259,12 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> }, "volatile_store" => { let dst = args[0].deref(cx); - args[1].val.volatile_store(&self, dst); + args[1].val.volatile_store(self, dst); return; }, "unaligned_volatile_store" => { let dst = args[0].deref(cx); - args[1].val.unaligned_volatile_store(&self, dst); + args[1].val.unaligned_volatile_store(self, dst); return; }, "prefetch_read_data" | "prefetch_write_data" | @@ -559,7 +559,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> "nontemporal_store" => { let dst = args[0].deref(cx); - args[1].val.nontemporal_store(&self, dst); + args[1].val.nontemporal_store(self, dst); return; } diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index d3c7f18fe598f..6ec187809ebfe 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -40,6 +40,7 @@ use back::write::create_target_machine; use syntax_pos::symbol::Symbol; extern crate flate2; +#[macro_use] extern crate bitflags; extern crate libc; #[macro_use] extern crate rustc; extern crate jobserver; @@ -66,7 +67,7 @@ extern crate cc; // Used to locate MSVC extern crate tempfile; extern crate memmap; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use time_graph::TimeGraph; use std::sync::mpsc::Receiver; use back::write::{self, OngoingCodegen}; @@ -76,22 +77,17 @@ use rustc::mir::mono::Stats; pub use llvm_util::target_features; use std::any::Any; use std::sync::mpsc; -use rustc_data_structures::sync::Lrc; use rustc::dep_graph::DepGraph; -use rustc::hir::def_id::CrateNum; use rustc::middle::allocator::AllocatorKind; use rustc::middle::cstore::{EncodedMetadata, MetadataLoader}; -use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; -use rustc::middle::lang_items::LangItem; use rustc::session::{Session, CompileIncomplete}; use rustc::session::config::{OutputFilenames, OutputType, PrintRequest}; use rustc::ty::{self, TyCtxt}; use rustc::util::time_graph; -use rustc::util::nodemap::{FxHashSet, FxHashMap}; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; -use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; +use rustc_codegen_ssa::{ModuleCodegen, CompiledModule, CachedModuleCodegen, CrateInfo}; use rustc_codegen_utils::codegen_backend::CodegenBackend; use rustc_data_structures::svh::Svh; @@ -111,31 +107,26 @@ mod back { pub mod wasm; } -mod interfaces; - mod abi; mod allocator; mod asm; mod attributes; mod base; -mod builder; mod callee; +mod builder; mod common; mod consts; mod context; mod debuginfo; mod declare; -mod glue; mod intrinsic; +mod mono_item; // The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912. #[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; } mod llvm_util; mod metadata; -mod meth; -mod mir; -mod mono_item; mod type_; mod type_of; mod value; @@ -272,13 +263,13 @@ impl CodegenBackend for LlvmCodegenBackend { fn provide(&self, providers: &mut ty::query::Providers) { back::symbol_names::provide(providers); back::symbol_export::provide(providers); - base::provide(providers); + rustc_codegen_ssa::base::provide(providers); attributes::provide(providers); } fn provide_extern(&self, providers: &mut ty::query::Providers) { back::symbol_export::provide_extern(providers); - base::provide_extern(providers); + rustc_codegen_ssa::base::provide_extern(providers); attributes::provide_extern(providers); } @@ -287,7 +278,7 @@ impl CodegenBackend for LlvmCodegenBackend { tcx: TyCtxt<'a, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> Box { - box base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) + box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) } fn join_codegen_and_link( diff --git a/src/librustc_codegen_llvm/mono-item.rs b/src/librustc_codegen_llvm/mono_item.rs similarity index 93% rename from src/librustc_codegen_llvm/mono-item.rs rename to src/librustc_codegen_llvm/mono_item.rs index f103e32fe80a3..be5fa7f73ee29 100644 --- a/src/librustc_codegen_llvm/mono-item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -14,24 +14,17 @@ use context::CodegenCx; use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; -use rustc::hir; -use rustc::hir::def::Def; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; -use rustc::ty::{TypeFoldable, Ty}; -use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; -use std::fmt; +use rustc::ty::TypeFoldable; +use rustc::ty::layout::LayoutOf; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; pub use rustc::mir::mono::MonoItem; pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; - -impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx> - for MonoItem<'tcx> {} - impl<'ll, 'tcx: 'll> PreDefineMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn predefine_static(&self, def_id: DefId, diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index e620a09b62bd6..8582ab0077439 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -16,7 +16,7 @@ use llvm; use llvm::{Bool, False, True}; use context::CodegenCx; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use syntax::ast; @@ -414,7 +414,7 @@ impl LayoutTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { ty.is_llvm_scalar_pair() } fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64 { - ty.llvm_field_index() + ty.llvm_field_index(index) } fn scalar_pair_element_backend_type<'a>( &self, diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index bda3dd793ad63..cdefd543ea67a 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -17,7 +17,7 @@ use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; use value::Value; -use interfaces::*; +use rustc_codegen_ssa::interfaces::*; use std::fmt::Write; diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index 5ad0fecc326ba..6b48a6269488c 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -12,7 +12,7 @@ pub use llvm::Value; use llvm; -use interfaces::CodegenObject; +use rustc_codegen_ssa::interfaces::CodegenObject; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 48ef2d6ff8829..05cc864c33ccc 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -473,7 +473,7 @@ pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx /// Create the `main` function which will initialize the rust runtime and call /// users main function. -fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( +pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( cx: &'a Bx::CodegenCx ) { let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs index 2891b1c22fe3b..0fc61422bb3a2 100644 --- a/src/librustc_codegen_ssa/debuginfo.rs +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -57,8 +57,8 @@ pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) } pub struct FunctionDebugContextData { - fn_metadata: D, - source_locations_enabled: Cell, + pub fn_metadata: D, + pub source_locations_enabled: Cell, pub defining_crate: CrateNum, } diff --git a/src/librustc_codegen_ssa/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs index 0e8411d7f3792..4e536c6c84946 100644 --- a/src/librustc_codegen_ssa/interfaces/debuginfo.rs +++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs @@ -54,7 +54,7 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { defining_crate: CrateNum, ) -> Self::DIScope; fn debuginfo_finalize(&self); - fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64]; + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4]; } pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs index 9c4dfb0825ad5..818c96f9e91b6 100644 --- a/src/librustc_codegen_ssa/lib.rs +++ b/src/librustc_codegen_ssa/lib.rs @@ -141,22 +141,22 @@ bitflags! { } /// Misc info we load from metadata to persist beyond the tcx -struct CrateInfo { - panic_runtime: Option, - compiler_builtins: Option, - profiler_runtime: Option, - sanitizer_runtime: Option, - is_no_builtins: FxHashSet, - native_libraries: FxHashMap>>, - crate_name: FxHashMap, - used_libraries: Lrc>, - link_args: Lrc>, - used_crate_source: FxHashMap>, - used_crates_static: Vec<(CrateNum, LibSource)>, - used_crates_dynamic: Vec<(CrateNum, LibSource)>, - wasm_imports: FxHashMap, - lang_item_to_crate: FxHashMap, - missing_lang_items: FxHashMap>, +pub struct CrateInfo { + pub panic_runtime: Option, + pub compiler_builtins: Option, + pub profiler_runtime: Option, + pub sanitizer_runtime: Option, + pub is_no_builtins: FxHashSet, + pub native_libraries: FxHashMap>>, + pub crate_name: FxHashMap, + pub used_libraries: Lrc>, + pub link_args: Lrc>, + pub used_crate_source: FxHashMap>, + pub used_crates_static: Vec<(CrateNum, LibSource)>, + pub used_crates_dynamic: Vec<(CrateNum, LibSource)>, + pub wasm_imports: FxHashMap, + pub lang_item_to_crate: FxHashMap, + pub missing_lang_items: FxHashMap>, } __build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 5094f1b9c2b03..fc10330082eac 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -280,38 +280,31 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { ) where Bx::CodegenCx : Backend<'ll, Value = V> { self.store_with_flags(bx, dest, MemFlags::empty()); } -} -// impl OperandValue<&'ll Value> { -// -// pub fn volatile_store( -// self, -// bx: &Builder<'a, 'll, 'tcx, &'ll Value>, -// dest: PlaceRef<'tcx, &'ll Value> -// ) { -// self.store_with_flags(bx, dest, MemFlags::VOLATILE); -// } -// -// pub fn unaligned_volatile_store( -// self, -// bx: &Builder<'a, 'll, 'tcx, &'ll Value>, -// dest: PlaceRef<'tcx, &'ll Value> -// ) { -// self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); -// } -// } -// -// impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> { -// pub fn nontemporal_store( -// self, -// bx: &Builder<'a, 'll, 'tcx, &'ll Value>, -// dest: PlaceRef<'tcx, &'ll Value> -// ) { -// self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); -// } -// } + pub fn volatile_store>( + self, + bx: &Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V> { + self.store_with_flags(bx, dest, MemFlags::VOLATILE); + } + + pub fn unaligned_volatile_store>( + self, + bx: &Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V> { + self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); + } + + pub fn nontemporal_store>( + self, + bx: &Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V> { + self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); + } -impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { fn store_with_flags>( self, bx: &Bx, diff --git a/src/librustc_codegen_ssa/mono_item.rs b/src/librustc_codegen_ssa/mono_item.rs index 5bdef69757ed6..03f0da08c4bf7 100644 --- a/src/librustc_codegen_ssa/mono_item.rs +++ b/src/librustc_codegen_ssa/mono_item.rs @@ -119,3 +119,6 @@ pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll> : fmt::Debug + BaseMonoItemExt<'ll } } } + +impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx> + for MonoItem<'tcx> {} From 58c8f32e04cbde1e9098d34df5810ee705ffac6a Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 4 Oct 2018 10:41:22 +0200 Subject: [PATCH 70/76] Fix lines too long --- src/librustc_codegen_llvm/consts.rs | 3 ++- src/librustc_codegen_ssa/base.rs | 3 ++- src/librustc_codegen_ssa/interfaces/debuginfo.rs | 8 ++++++-- src/librustc_codegen_ssa/type_of.rs | 0 4 files changed, 10 insertions(+), 4 deletions(-) delete mode 100644 src/librustc_codegen_ssa/type_of.rs diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 588f4469ac4f8..43b9911e14338 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -11,7 +11,8 @@ use libc::c_uint; use llvm::{self, SetUnnamedAddr, True}; use rustc::hir::def_id::DefId; -use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, Pointer, ConstEvalErr, GlobalId}; +use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, + Pointer, ConstEvalErr, GlobalId}; use rustc::hir::Node; use debuginfo; use monomorphize::MonoItem; diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 05cc864c33ccc..5e9ec01de772d 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -48,7 +48,8 @@ use meth; use mir; use rustc::util::time_graph; use rustc_mir::monomorphize::Instance; -use rustc_mir::monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; +use rustc_mir::monomorphize::partitioning::{self, PartitioningStrategy, + CodegenUnit, CodegenUnitExt}; use mono_item::{MonoItem, BaseMonoItemExt}; use rustc::util::nodemap::{FxHashMap, FxHashSet, DefIdSet}; use rustc_data_structures::sync::Lrc; diff --git a/src/librustc_codegen_ssa/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs index 4e536c6c84946..6e01903fd7ab0 100644 --- a/src/librustc_codegen_ssa/interfaces/debuginfo.rs +++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs @@ -60,7 +60,9 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn declare_local( &self, - dbg_context: &FunctionDebugContext<>::DIScope>, + dbg_context: &FunctionDebugContext< + >::DIScope + >, variable_name: Name, variable_type: Ty<'tcx>, scope_metadata: >::DIScope, @@ -70,7 +72,9 @@ pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, ); fn set_source_location( &self, - debug_context: &FunctionDebugContext<>::DIScope>, + debug_context: &FunctionDebugContext< + >::DIScope + >, scope: Option<>::DIScope>, span: syntax_pos::Span, ); diff --git a/src/librustc_codegen_ssa/type_of.rs b/src/librustc_codegen_ssa/type_of.rs deleted file mode 100644 index e69de29bb2d1d..0000000000000 From 2a6412b53f812e925d776dbc9d20bc3d33fe7970 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Thu, 4 Oct 2018 15:23:10 +0200 Subject: [PATCH 71/76] Added some docs + start to &mut self builder methods --- src/librustc_codegen_llvm/builder.rs | 18 ++++++------ src/librustc_codegen_llvm/intrinsic.rs | 8 +++--- src/librustc_codegen_llvm/lib.rs | 2 +- src/librustc_codegen_ssa/base.rs | 4 +-- .../interfaces/backend.rs | 3 +- .../interfaces/builder.rs | 14 +++++----- src/librustc_codegen_ssa/interfaces/mod.rs | 18 +++++++++++- src/librustc_codegen_ssa/lib.rs | 8 +++--- src/librustc_codegen_ssa/mir/block.rs | 28 +++++++++---------- src/librustc_codegen_ssa/mir/mod.rs | 14 +++++----- src/librustc_codegen_ssa/mir/rvalue.rs | 6 ++-- 11 files changed, 70 insertions(+), 53 deletions(-) diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 9e2429f5fa1c8..a632749e26984 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -64,7 +64,7 @@ impl BuilderMethods<'a, 'll, 'tcx> llfn: &'ll Value, name: &'b str ) -> Self { - let bx = Builder::with_cx(cx); + let mut bx = Builder::with_cx(cx); let llbb = unsafe { let name = SmallCStr::new(name); llvm::LLVMAppendBasicBlockInContext( @@ -121,40 +121,40 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn set_value_name(&self, value: &'ll Value, name: &str) { + fn set_value_name(&mut self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - fn position_at_end(&self, llbb: &'ll BasicBlock) { + fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - fn position_at_start(&self, llbb: &'ll BasicBlock) { + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } } - fn ret_void(&self) { + fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } } - fn ret(&self, v: &'ll Value) { + fn ret(&mut self, v: &'ll Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - fn br(&self, dest: &'ll BasicBlock) { + fn br(&mut self, dest: &'ll BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); @@ -162,7 +162,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn cond_br( - &self, + &mut self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, @@ -446,7 +446,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - let bx = Builder::with_cx(self.cx); + let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 4b873b3f4729f..c0035740792a0 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -803,10 +803,10 @@ fn codegen_msvc_try( bx.set_personality_fn(bx.cx().eh_personality()); - let normal = bx.build_sibling_block("normal"); + let mut normal = bx.build_sibling_block("normal"); let catchswitch = bx.build_sibling_block("catchswitch"); let catchpad = bx.build_sibling_block("catchpad"); - let caught = bx.build_sibling_block("caught"); + let mut caught = bx.build_sibling_block("caught"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); @@ -927,8 +927,8 @@ fn codegen_gnu_try( // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bx.build_sibling_block("then"); - let catch = bx.build_sibling_block("catch"); + let mut then = bx.build_sibling_block("then"); + let mut catch = bx.build_sibling_block("catch"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 6ec187809ebfe..d11af9fa64b84 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -133,7 +133,7 @@ mod value; pub struct LlvmCodegenBackend(()); -impl BackendMethods for LlvmCodegenBackend { +impl ExtraBackendMethods for LlvmCodegenBackend { type Metadata = ModuleLlvm; type OngoingCodegen = OngoingCodegen; diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 5e9ec01de772d..db0062d14c953 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -535,7 +535,7 @@ pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, cx.set_frame_pointer_elimination(llfn); cx.apply_target_cpu_attr(llfn); - let bx = Bx::new_block(&cx, llfn, "top"); + let mut bx = Bx::new_block(&cx, llfn, "top"); bx.insert_reference_to_gdb_debug_scripts_section_global(); @@ -571,7 +571,7 @@ pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); -pub fn codegen_crate( +pub fn codegen_crate( backend: B, tcx: TyCtxt<'ll, 'tcx, 'tcx>, rx: mpsc::Receiver> diff --git a/src/librustc_codegen_ssa/interfaces/backend.rs b/src/librustc_codegen_ssa/interfaces/backend.rs index fc0171d6e3e11..fbdccfca39da8 100644 --- a/src/librustc_codegen_ssa/interfaces/backend.rs +++ b/src/librustc_codegen_ssa/interfaces/backend.rs @@ -11,6 +11,7 @@ use super::CodegenObject; use {ModuleCodegen, CachedModuleCodegen}; use rustc::session::Session; +use rustc_codegen_utils::codegen_backend::CodegenBackend; use rustc::middle::cstore::EncodedMetadata; use rustc::middle::allocator::AllocatorKind; use rustc::ty::TyCtxt; @@ -27,7 +28,7 @@ pub trait Backend<'ll> { type Context; } -pub trait BackendMethods { +pub trait ExtraBackendMethods : CodegenBackend { type Metadata; type OngoingCodegen; diff --git a/src/librustc_codegen_ssa/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs index 03dbd37dce59e..4d45844e08a9e 100644 --- a/src/librustc_codegen_ssa/interfaces/builder.rs +++ b/src/librustc_codegen_ssa/interfaces/builder.rs @@ -50,14 +50,14 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + fn llbb(&self) -> >::BasicBlock; fn count_insn(&self, category: &str); - fn set_value_name(&self, value: >::Value, name: &str); - fn position_at_end(&self, llbb: >::BasicBlock); - fn position_at_start(&self, llbb: >::BasicBlock); - fn ret_void(&self); - fn ret(&self, v: >::Value); - fn br(&self, dest: >::BasicBlock); + fn set_value_name(&mut self, value: >::Value, name: &str); + fn position_at_end(&mut self, llbb: >::BasicBlock); + fn position_at_start(&mut self, llbb: >::BasicBlock); + fn ret_void(&mut self); + fn ret(&mut self, v: >::Value); + fn br(&mut self, dest: >::BasicBlock); fn cond_br( - &self, + &mut self, cond: >::Value, then_llbb: >::BasicBlock, else_llbb: >::BasicBlock, diff --git a/src/librustc_codegen_ssa/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs index 2cbe94749a0b1..61cad6d0b44ba 100644 --- a/src/librustc_codegen_ssa/interfaces/mod.rs +++ b/src/librustc_codegen_ssa/interfaces/mod.rs @@ -8,6 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +//! Interface of a Rust codegen backend +//! +//! This crate defines all the traits that have to be implemented by a codegen backend in order to +//! use the backend-agnostic codegen code in `rustc_codegen_ssa`. +//! +//! The interface is designed around two backend-specific data structures, the codegen context and +//! the builder. The codegen context is supposed to be read-only after its creation and during the +//! actual codegen, while the builder stores the information about the function during codegen and +//! is used to produce the instructions of the backend IR. +//! +//! Finaly, a third `Backend` structure has to implement methods related to how codegen information +//! is passed to the backend, especially for asynchronous compilation. +//! +//! The traits contain associated types that are backend-specific, such as the backend's value or +//! basic blocks. + use std::fmt; mod backend; mod misc; @@ -21,7 +37,7 @@ mod debuginfo; mod abi; mod asm; -pub use self::backend::{Backend, BackendMethods}; +pub use self::backend::{Backend, ExtraBackendMethods}; pub use self::misc::MiscMethods; pub use self::statics::StaticMethods; pub use self::declare::{DeclareMethods, PreDefineMethods}; diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs index 818c96f9e91b6..2a42ad91e3d58 100644 --- a/src/librustc_codegen_ssa/lib.rs +++ b/src/librustc_codegen_ssa/lib.rs @@ -8,10 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! # Note -//! -//! This API is completely unstable and subject to change. - #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] @@ -28,6 +24,10 @@ #![allow(dead_code)] #![feature(quote)] +//! This crate contains codegen code that is used by all codegen backends (LLVM and others). +//! The backend-agnostic functions of this crate use functions defined in various traits that +//! have to be implemented by each backends. + #[macro_use] extern crate bitflags; #[macro_use] extern crate log; extern crate rustc_apfloat; diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index de39b180bc59c..b216379b376d4 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -102,7 +102,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> }; let funclet_br = - |this: &mut Self, bx: &Bx, target: mir::BasicBlock| { + |this: &mut Self, bx: &mut Bx, target: mir::BasicBlock| { let (lltarget, is_cleanupret) = lltarget(this, target); if is_cleanupret { // micro-optimization: generate a `ret` rather than a jump @@ -115,7 +115,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> let do_call = | this: &mut Self, - bx: &Bx, + bx: &mut Bx, fn_ty: FnType<'tcx, Ty<'tcx>>, fn_ptr: Cx::Value, llargs: &[Cx::Value], @@ -191,7 +191,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } mir::TerminatorKind::Goto { target } => { - funclet_br(self, &bx, target); + funclet_br(self, &mut bx, target); } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { @@ -293,7 +293,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - funclet_br(self, &bx, target); + funclet_br(self, &mut bx, target); return } @@ -324,7 +324,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> bx.cx().fn_type_of_instance(&drop_fn)) } }; - do_call(self, &bx, fn_ty, drop_fn, args, + do_call(self, &mut bx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); } @@ -348,7 +348,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - funclet_br(self, &bx, target); + funclet_br(self, &mut bx, target); return; } @@ -419,7 +419,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. - do_call(self, &bx, fn_ty, llfn, &args, None, cleanup); + do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { @@ -469,7 +469,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; self.codegen_transmute(&bx, &args[0], dest); - funclet_br(self, &bx, target); + funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -496,7 +496,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> Some(ty::InstanceDef::DropGlue(_, None)) => { // empty drop glue - a nop. let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, &bx, target); + funclet_br(self, &mut bx, target); return; } _ => bx.cx().new_fn_type(sig, &extra_args) @@ -542,7 +542,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // Codegen the actual panic invoke/call. do_call( self, - &bx, + &mut bx, fn_ty, llfn, &[msg_file_line_col], @@ -640,7 +640,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } if let Some((_, target)) = *destination { - funclet_br(self, &bx, target); + funclet_br(self, &mut bx, target); } else { bx.unreachable(); } @@ -692,7 +692,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> _ => span_bug!(span, "no llfn for call"), }; - do_call(self, &bx, fn_ty, fn_ptr, &llargs, + do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } @@ -864,7 +864,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> span_bug!(self.mir.span, "landing pad was not inserted?") } - let bx : Bx = self.new_block("cleanup"); + let mut bx : Bx = self.new_block("cleanup"); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); @@ -903,7 +903,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> &self, bb: mir::BasicBlock ) -> Bx { - let bx = Bx::with_cx(self.cx); + let mut bx = Bx::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index c6b7c58e3ea0f..6912f78ace25d 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -222,7 +222,7 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( debug!("fn_ty: {:?}", fn_ty); let debug_context = cx.create_function_debug_context(instance, sig, llfn, mir); - let bx = Bx::new_block(cx, llfn, "start"); + let mut bx = Bx::new_block(cx, llfn, "start"); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); @@ -243,7 +243,7 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // Compute debuginfo scopes from MIR scopes. let scopes = cx.create_mir_scopes(mir, &debug_context); - let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs); + let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { instance, @@ -270,7 +270,7 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // Allocate variable and temp allocas fx.locals = { - let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals); + let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; @@ -371,7 +371,7 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( mir: &'a Mir<'tcx>, - bx: &Bx, + bx: &mut Bx, cleanup_kinds: &IndexVec, block_bxs: &IndexVec>::BasicBlock>) -> (IndexVec>::BasicBlock>>, @@ -408,7 +408,7 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // } Some(&mir::TerminatorKind::Abort) => { let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); - let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); + let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); let cs = cs_bx.catch_switch(None, None, 1); @@ -424,7 +424,7 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( cp_bx.br(llbb); } _ => { - let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); + let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); ret_llbb = cleanup_bx.llbb(); cleanup = cleanup_bx.cleanup_pad(None, &[]); cleanup_bx.br(llbb); @@ -439,7 +439,7 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( /// argument's value. As arguments are places, these are always /// indirect. fn arg_local_refs<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, fx: &FunctionCx<'a, 'f, 'll, 'tcx, Bx::CodegenCx>, scopes: &IndexVec< mir::SourceScope, diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 7514cbd903073..9859804cd0340 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -33,7 +33,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { pub fn codegen_rvalue>( &mut self, - bx: Bx, + mut bx: Bx, dest: PlaceRef<'tcx, Cx::Value>, rvalue: &mir::Rvalue<'tcx> ) -> Bx { @@ -124,8 +124,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> let count = bx.cx().const_usize(count); let end = dest.project_index(&bx, count).llval; - let header_bx = bx.build_sibling_block("repeat_loop_header"); - let body_bx = bx.build_sibling_block("repeat_loop_body"); + let mut header_bx = bx.build_sibling_block("repeat_loop_header"); + let mut body_bx = bx.build_sibling_block("repeat_loop_body"); let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); From 376f4fe044631462ed0754c3861038ccc07a528a Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Fri, 5 Oct 2018 15:08:49 +0200 Subject: [PATCH 72/76] All Builder methods now take &mut self instead of &self --- src/librustc_codegen_llvm/abi.rs | 33 +-- src/librustc_codegen_llvm/asm.rs | 2 +- src/librustc_codegen_llvm/builder.rs | 252 +++++++++--------- src/librustc_codegen_llvm/debuginfo/gdb.rs | 2 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 6 +- src/librustc_codegen_llvm/intrinsic.rs | 74 ++--- src/librustc_codegen_ssa/base.rs | 52 ++-- src/librustc_codegen_ssa/common.rs | 11 +- src/librustc_codegen_ssa/glue.rs | 17 +- src/librustc_codegen_ssa/interfaces/abi.rs | 2 +- src/librustc_codegen_ssa/interfaces/asm.rs | 2 +- .../interfaces/builder.rs | 230 ++++++++-------- .../interfaces/debuginfo.rs | 6 +- .../interfaces/intrinsic.rs | 2 +- src/librustc_codegen_ssa/interfaces/type_.rs | 4 +- src/librustc_codegen_ssa/meth.rs | 16 +- src/librustc_codegen_ssa/mir/block.rs | 103 +++---- src/librustc_codegen_ssa/mir/mod.rs | 15 +- src/librustc_codegen_ssa/mir/operand.rs | 41 +-- src/librustc_codegen_ssa/mir/place.rs | 36 +-- src/librustc_codegen_ssa/mir/rvalue.rs | 188 ++++++------- src/librustc_codegen_ssa/mir/statement.rs | 22 +- 22 files changed, 570 insertions(+), 546 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 156aef3a100df..60644de100ce8 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -172,13 +172,13 @@ pub trait ArgTypeExt<'ll, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; fn store( &self, - bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'_, 'll, 'tcx, &'ll Value>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value> ); fn store_fn_arg( &self, - bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'_, 'll, 'tcx, &'ll Value>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value> ); } @@ -196,7 +196,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// or results of call/invoke instructions into their destinations. fn store( &self, - bx: &Builder<'_, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'_, 'll, 'tcx, &'ll Value>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value> ) { @@ -240,10 +240,13 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { bx.store(val, llscratch, scratch_align); // ...and then memcpy it to the intended destination. + let llval_cast = bx.pointercast(dst.llval, cx.type_i8p()); + let llscratch_cast = bx.pointercast(llscratch, cx.type_i8p()); + let size = cx.const_usize(self.layout.size.bytes()); bx.call_memcpy( - bx.pointercast(dst.llval, cx.type_i8p()), - bx.pointercast(llscratch, cx.type_i8p()), - cx.const_usize(self.layout.size.bytes()), + llval_cast, + llscratch_cast, + size, self.layout.align.min(scratch_align), MemFlags::empty() ); @@ -257,7 +260,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { fn store_fn_arg( &self, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value> ) { @@ -283,19 +286,19 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { impl<'a, 'll: 'a, 'tcx: 'll> ArgTypeMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn store_fn_arg( - &self, + &mut self, ty: &ArgType<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, >::Value> ) { - ty.store_fn_arg(&self, idx, dst) + ty.store_fn_arg(self, idx, dst) } fn store_arg_ty( - &self, + &mut self, ty: &ArgType<'tcx, Ty<'tcx>>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value> ) { - ty.store(&self, val, dst) + ty.store(self, val, dst) } fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type { ty.memory_ty(self.cx()) @@ -322,7 +325,7 @@ pub trait FnTypeExt<'tcx> { fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value); + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value); } impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { @@ -725,7 +728,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value) { + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value) { let mut i = 0; let mut apply = |attrs: &ArgAttributes| { attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); @@ -796,10 +799,10 @@ impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { impl AbiBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn apply_attrs_callsite( - &self, + &mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: >::Value ) { - ty.apply_attrs_callsite(&self, callsite) + ty.apply_attrs_callsite(self, callsite) } } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index ed5548bcefbc2..2d6b9f156e922 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -26,7 +26,7 @@ use libc::{c_uint, c_char}; impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn codegen_inline_asm( - &self, + &mut self, ia: &hir::InlineAsm, outputs: Vec>, mut inputs: Vec<&'ll Value> diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index a632749e26984..34a8ae6b862de 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -174,7 +174,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn switch( - &self, + &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, num_cases: usize, @@ -185,7 +185,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn invoke( - &self, + &mut self, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, @@ -230,7 +230,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn unreachable(&self) { + fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); @@ -238,21 +238,21 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Arithmetic */ - fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -261,21 +261,21 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -284,21 +284,21 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -308,42 +308,42 @@ impl BuilderMethods<'a, 'll, 'tcx> } - fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -352,28 +352,28 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -382,70 +382,70 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - fn neg(&self, v: &'ll Value) -> &'ll Value { + fn neg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - fn fneg(&self, v: &'ll Value) -> &'ll Value { + fn fneg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - fn not(&self, v: &'ll Value) -> &'ll Value { + fn not(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -453,7 +453,7 @@ impl BuilderMethods<'a, 'll, 'tcx> bx.dynamic_alloca(ty, name, align) } - fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -468,7 +468,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn array_alloca(&self, + fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, name: &str, @@ -487,7 +487,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -496,7 +496,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -506,7 +506,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_load( - &self, + &mut self, ptr: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, align: Align @@ -528,7 +528,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn load_ref( - &self, + &mut self, ptr: &PlaceRef<'tcx, &'ll Value> ) -> OperandRef<'tcx, &'ll Value> { debug!("PlaceRef::load: {:?}", ptr); @@ -539,21 +539,25 @@ impl BuilderMethods<'a, 'll, 'tcx> return OperandRef::new_zst(self.cx(), ptr.layout); } - let scalar_load_metadata = |load, scalar: &layout::Scalar| { + fn scalar_load_metadata<'a, 'll, 'tcx>( + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + load: &'ll Value, + scalar: &layout::Scalar + ) { let vr = scalar.valid_range.clone(); match scalar.value { layout::Int(..) => { - let range = scalar.valid_range_exclusive(self.cx()); + let range = scalar.valid_range_exclusive(bx.cx()); if range.start != range.end { - &self.range_metadata(load, range); + bx.range_metadata(load, range); } } layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - &self.nonnull_metadata(load); + bx.nonnull_metadata(load); } _ => {} } - }; + } let val = if let Some(llextra) = ptr.llextra { OperandValue::Ref(ptr.llval, Some(llextra), ptr.align) @@ -567,18 +571,18 @@ impl BuilderMethods<'a, 'll, 'tcx> } } let llval = const_llval.unwrap_or_else(|| { - let load = &self.load(ptr.llval, ptr.align); + let load = self.load(ptr.llval, ptr.align); if let layout::Abi::Scalar(ref scalar) = ptr.layout.abi { - scalar_load_metadata(load, scalar); + scalar_load_metadata(self, load, scalar); } load }); OperandValue::Immediate(to_immediate(self, llval, ptr.layout)) } else if let layout::Abi::ScalarPair(ref a, ref b) = ptr.layout.abi { - let load = |i, scalar: &layout::Scalar| { + let mut load = |i, scalar: &layout::Scalar| { let llptr = self.struct_gep(ptr.llval, i as u64); let load = self.load(llptr, ptr.align); - scalar_load_metadata(load, scalar); + scalar_load_metadata(self, load, scalar); if scalar.is_bool() { self.trunc(load, self.cx().type_i1()) } else { @@ -595,7 +599,7 @@ impl BuilderMethods<'a, 'll, 'tcx> - fn range_metadata(&self, load: &'ll Value, range: Range) { + fn range_metadata(&mut self, load: &'ll Value, range: Range) { if self.cx().sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, @@ -618,18 +622,18 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn nonnull_metadata(&self, load: &'ll Value) { + fn nonnull_metadata(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); @@ -648,7 +652,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn store_with_flags( - &self, + &mut self, val: &'ll Value, ptr: &'ll Value, align: Align, @@ -681,7 +685,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -689,7 +693,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -698,77 +702,77 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Casts */ - fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) @@ -776,14 +780,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } - fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { self.count_insn("intcast"); unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("pointercast"); unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) @@ -791,7 +795,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Comparisons */ - fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); let op = llvm::IntPredicate::from_generic(op); unsafe { @@ -799,7 +803,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -807,14 +811,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } /* Miscellaneous instructions */ - fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -826,7 +830,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, + fn inline_asm_call(&mut self, asm: *const c_char, cons: *const c_char, inputs: &[&'ll Value], output: &'ll Type, volatile: bool, alignstack: bool, dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { @@ -859,14 +863,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -875,7 +879,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn select( - &self, cond: &'ll Value, + &mut self, cond: &'ll Value, then_val: &'ll Value, else_val: &'ll Value, ) -> &'ll Value { @@ -886,14 +890,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } #[allow(dead_code)] - fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) @@ -901,7 +905,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn insert_element( - &self, vec: &'ll Value, + &mut self, vec: &'ll Value, elt: &'ll Value, idx: &'ll Value, ) -> &'ll Value { @@ -911,14 +915,14 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = self.cx.val_ty(elt); let undef = llvm::LLVMGetUndef(&self.cx().type_vector(elt_ty, num_elts as u64)); @@ -928,7 +932,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once @@ -940,7 +944,7 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once @@ -952,56 +956,56 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.add"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") } } - fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.mul"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") } } - fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.and"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") } } - fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.or"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") } } - fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.xor"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") } } - fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") } } - fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") } } - fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) @@ -1010,7 +1014,7 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) @@ -1019,14 +1023,14 @@ impl BuilderMethods<'a, 'll, 'tcx> instr } } - fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.min"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") } } - fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.max"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); @@ -1034,7 +1038,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -1042,7 +1046,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -1052,7 +1056,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, + fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { @@ -1061,27 +1065,27 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - fn set_cleanup(&self, landing_pad: &'ll Value) { + fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - fn resume(&self, exn: &'ll Value) -> &'ll Value { + fn resume(&mut self, exn: &'ll Value) -> &'ll Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } } - fn cleanup_pad(&self, + fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> &'ll Value { self.count_insn("cleanuppad"); @@ -1097,7 +1101,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn cleanup_ret( - &self, cleanup: &'ll Value, + &mut self, cleanup: &'ll Value, unwind: Option<&'ll BasicBlock>, ) -> &'ll Value { self.count_insn("cleanupret"); @@ -1107,7 +1111,7 @@ impl BuilderMethods<'a, 'll, 'tcx> ret.expect("LLVM does not have support for cleanupret") } - fn catch_pad(&self, + fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> &'ll Value { self.count_insn("catchpad"); @@ -1120,7 +1124,7 @@ impl BuilderMethods<'a, 'll, 'tcx> ret.expect("LLVM does not have support for catchpad") } - fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + fn catch_ret(&mut self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { self.count_insn("catchret"); let ret = unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) @@ -1129,7 +1133,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn catch_switch( - &self, + &mut self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, num_handlers: usize, @@ -1144,13 +1148,13 @@ impl BuilderMethods<'a, 'll, 'tcx> ret.expect("LLVM does not have support for catchswitch") } - fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - fn set_personality_fn(&self, personality: &'ll Value) { + fn set_personality_fn(&mut self, personality: &'ll Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } @@ -1158,7 +1162,7 @@ impl BuilderMethods<'a, 'll, 'tcx> // Atomic Operations fn atomic_cmpxchg( - &self, + &mut self, dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, @@ -1180,7 +1184,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } fn atomic_rmw( - &self, + &mut self, op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, @@ -1198,7 +1202,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn atomic_fence( - &self, + &mut self, order: rustc_codegen_ssa::common::AtomicOrdering, scope: rustc_codegen_ssa::common::SynchronizationScope ) { @@ -1211,27 +1215,27 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - fn set_invariant_load(&self, load: &'ll Value) { + fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn check_store<'b>(&self, + fn check_store<'b>(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); @@ -1250,7 +1254,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } } - fn check_call<'b>(&self, + fn check_call<'b>(&mut self, typ: &str, llfn: &'ll Value, args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { @@ -1292,15 +1296,15 @@ impl BuilderMethods<'a, 'll, 'tcx> Cow::Owned(casted_args) } - fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; } @@ -1317,7 +1321,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn call( - &self, + &mut self, llfn: &'ll Value, args: &[&'ll Value], bundle: Option<&rustc_codegen_ssa::common::OperandBundleDef<'ll, &'ll Value>> @@ -1355,7 +1359,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn call_memcpy( - &self, + &mut self, dst: &'ll Value, src: &'ll Value, n_bytes: &'ll Value, @@ -1382,7 +1386,7 @@ impl BuilderMethods<'a, 'll, 'tcx> } fn call_memset( - &self, + &mut self, ptr: &'ll Value, fill_byte: &'ll Value, size: &'ll Value, @@ -1396,14 +1400,14 @@ impl BuilderMethods<'a, 'll, 'tcx> &self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } - fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("zext"); unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) } } - fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("structgep"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -1415,13 +1419,13 @@ impl BuilderMethods<'a, 'll, 'tcx> &self.cx } - fn delete_basic_block(&self, bb: &'ll BasicBlock) { + fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { unsafe { llvm::LLVMDeleteBasicBlock(bb); } } - fn do_not_inline(&self, llret: &'ll Value) { + fn do_not_inline(&mut self, llret: &'ll Value) { llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); } } diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index d1587ec19963e..d27f20d5b272f 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -23,7 +23,7 @@ use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder<'_, 'll, '_, &'ll Value>) { +pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, 'll, '_, &'ll Value>) { if needs_gdb_debug_scripts_section(bx.cx()) { let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 345a743a7513d..256d6aa8fcf3f 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -162,7 +162,7 @@ impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn declare_local( - &self, + &mut self, dbg_context: &FunctionDebugContext<&'ll DISubprogram>, variable_name: ast::Name, variable_type: Ty<'tcx>, @@ -228,14 +228,14 @@ impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> } fn set_source_location( - &self, + &mut self, debug_context: &FunctionDebugContext<&'ll DISubprogram>, scope: Option<&'ll DIScope>, span: Span, ) { set_source_location(debug_context, &self, scope, span) } - fn insert_reference_to_gdb_debug_scripts_section_global(&self) { + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { gdb::insert_reference_to_gdb_debug_scripts_section_global(self) } } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index c0035740792a0..dfdfd46bcc3b5 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -88,7 +88,7 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Opti impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { fn codegen_intrinsic_call( - &self, + &mut self, callee_ty: Ty<'tcx>, fn_ty: &FnType<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, &'ll Value>], @@ -126,7 +126,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None) } "try" => { - try_intrinsic(bx, cx, + try_intrinsic(self, cx, args[0].immediate(), args[1].immediate(), args[2].immediate(), @@ -185,7 +185,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> // data structures, and the generated code will be awful. (A telltale sign of // this is large quantities of `mov [byte ptr foo],0` in the generated code.) memset_intrinsic( - &self, + self, false, ty, llresult, @@ -216,28 +216,28 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> } "copy_nonoverlapping" => { - copy_intrinsic(&self, false, false, substs.type_at(0), + copy_intrinsic(self, false, false, substs.type_at(0), args[1].immediate(), args[0].immediate(), args[2].immediate()) } "copy" => { - copy_intrinsic(&self, true, false, substs.type_at(0), + copy_intrinsic(self, true, false, substs.type_at(0), args[1].immediate(), args[0].immediate(), args[2].immediate()) } "write_bytes" => { - memset_intrinsic(&self, false, substs.type_at(0), + memset_intrinsic(self, false, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(&self, false, true, substs.type_at(0), + copy_intrinsic(self, false, true, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_copy_memory" => { - copy_intrinsic(&self, true, true, substs.type_at(0), + copy_intrinsic(self, true, true, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_set_memory" => { - memset_intrinsic(&self, true, substs.type_at(0), + memset_intrinsic(self, true, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_load" | "unaligned_volatile_load" => { @@ -332,8 +332,9 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> args[1].immediate() ], None); let val = &self.extract_value(pair, 0); + let val1 = &self.extract_value(pair, 1); let overflow = &self.zext( - &self.extract_value(pair, 1), + val1, cx.type_bool() ); @@ -412,7 +413,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> } name if name.starts_with("simd_") => { - match generic_simd_intrinsic(&self, name, + match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, @@ -474,8 +475,9 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> failorder, weak); let val = &self.extract_value(pair, 0); + let val1 = &self.extract_value(pair, 1); let success = &self.zext( - &self.extract_value(pair, 1), + val1, &self.cx().type_bool() ); @@ -618,7 +620,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> // arguments to be truncated as needed and pointers to be // cast. fn modify_as_needed<'a, 'll: 'a, 'tcx: 'll>( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, t: &intrinsics::Type, arg: &OperandRef<'tcx, &'ll Value>, ) -> Vec<&'ll Value> { @@ -637,7 +639,8 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> }; let arg = PlaceRef::new_sized(ptr, arg.layout, align); (0..contents.len()).map(|i| { - bx.load_ref(&arg.project_field(bx, i)).immediate() + let field = arg.project_field(bx, i); + bx.load_ref(&field).immediate() }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { @@ -669,7 +672,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let outputs = one(ty_to_type(cx, &intr.output)); let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { - modify_as_needed(&self, t, arg) + modify_as_needed(self, t, arg) }).collect(); assert_eq!(inputs.len(), llargs.len()); @@ -713,7 +716,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> } fn copy_intrinsic( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, allow_overlap: bool, volatile: bool, ty: Ty<'tcx>, @@ -738,18 +741,19 @@ fn copy_intrinsic( let dst_ptr = bx.pointercast(dst, cx.type_i8p()); let src_ptr = bx.pointercast(src, cx.type_i8p()); let llfn = cx.get_intrinsic(&name); - + let mul = bx.mul(size, count); + let volatile_const = cx.const_bool(volatile); bx.call(llfn, &[dst_ptr, src_ptr, - bx.mul(size, count), + mul, align, - cx.const_bool(volatile)], + volatile_const], None) } fn memset_intrinsic( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, @@ -761,11 +765,12 @@ fn memset_intrinsic( let size = cx.const_usize(size.bytes()); let align = cx.const_i32(align.abi() as i32); let dst = bx.pointercast(dst, cx.type_i8p()); - bx.call_memset(dst, val, bx.mul(size, count), align, volatile) + let mul = bx.mul(size, count); + bx.call_memset(dst, val, mul, align, volatile) } fn try_intrinsic( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, @@ -791,21 +796,21 @@ fn try_intrinsic( // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. fn codegen_msvc_try( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { + let llfn = get_rust_try_fn(cx, &mut |mut bx| { let cx = bx.cx(); bx.set_personality_fn(bx.cx().eh_personality()); let mut normal = bx.build_sibling_block("normal"); - let catchswitch = bx.build_sibling_block("catchswitch"); - let catchpad = bx.build_sibling_block("catchpad"); + let mut catchswitch = bx.build_sibling_block("catchswitch"); + let mut catchpad = bx.build_sibling_block("catchpad"); let mut caught = bx.build_sibling_block("caught"); let func = llvm::get_param(bx.llfn(), 0); @@ -872,10 +877,12 @@ fn codegen_msvc_try( let i64_align = bx.tcx().data_layout.i64_align; let arg1 = catchpad.load(addr, i64_align); let val1 = cx.const_i32(1); - let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); + let gep1 = catchpad.inbounds_gep(addr, &[val1]); + let arg2 = catchpad.load(gep1, i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); + let gep2 = catchpad.inbounds_gep(local_ptr, &[val1]); catchpad.store(arg1, local_ptr, i64_align); - catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); + catchpad.store(arg2, gep2, i64_align); catchpad.catch_ret(tok, caught.llbb()); caught.ret(cx.const_i32(1)); @@ -900,14 +907,14 @@ fn codegen_msvc_try( // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. fn codegen_gnu_try( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { + let llfn = get_rust_try_fn(cx, &mut |mut bx| { let cx = bx.cx(); // Codegens the shims described above: @@ -947,7 +954,8 @@ fn codegen_gnu_try( catch.add_clause(vals, bx.cx().const_null(cx.type_i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; - catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align); + let bitcast = catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())); + catch.store(ptr, bitcast, ptr_align); catch.ret(cx.const_i32(1)); }); @@ -1014,7 +1022,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic( - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, name: &str, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], @@ -1190,7 +1198,7 @@ fn generic_simd_intrinsic( in_elem: &::rustc::ty::TyS, in_ty: &::rustc::ty::TyS, in_len: usize, - bx: &Builder<'a, 'll, 'tcx, &'ll Value>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, span: Span, args: &[OperandRef<'tcx, &'ll Value>], ) -> Result<&'ll Value, ()> { diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index db0062d14c953..256efdc11ff4e 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -141,7 +141,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { } pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, lhs: >::Value, rhs: >::Value, t: Ty<'tcx>, @@ -151,7 +151,8 @@ pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tc let signed = match t.sty { ty::Float(_) => { let cmp = bin_op_to_fcmp_predicate(op); - return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); + let cmp = bx.fcmp(cmp, lhs, rhs); + return bx.sext(cmp, ret_ty); }, ty::Uint(_) => false, ty::Int(_) => true, @@ -159,11 +160,12 @@ pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tc }; let cmp = bin_op_to_icmp_predicate(op, signed); + let cmp = bx.icmp(cmp, lhs, rhs); // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension // to get the correctly sized type. This will compile to a single instruction // once the IR is converted to assembly if the SIMD instruction is supported // by the target architecture. - bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) + bx.sext(cmp, ret_ty) } /// Retrieve the information we are losing (making dynamic) in an unsizing @@ -203,7 +205,7 @@ pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, src: >::Value, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> @@ -260,14 +262,14 @@ pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx> /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, src: PlaceRef<'tcx, >::Value>, dst: PlaceRef<'tcx, >::Value> ) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let src_ty = src.layout.ty; let dst_ty = dst.layout.ty; - let coerce_ptr = || { + let mut coerce_ptr = || { let (base, info) = match bx.load_ref(&src).val { OperandValue::Pair(base, info) => { // fat-ptr to fat-ptr unsize preserves the vtable @@ -320,31 +322,20 @@ pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, ' } pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, op: hir::BinOpKind, lhs: >::Value, rhs: >::Value ) -> >::Value { - cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b)) + cast_shift_rhs(bx, op, lhs, rhs) } -fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, +fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, op: hir::BinOpKind, lhs: >::Value, rhs: >::Value, - trunc: F, - zext: G -) -> >::Value - where F: FnOnce( - >::Value, - >::Type - ) -> >::Value, - G: FnOnce( - >::Value, - >::Type - ) -> >::Value -{ +) -> >::Value { // Shifts may have any size int on the rhs if op.is_shift() { let mut rhs_llty = bx.cx().val_ty(rhs); @@ -358,11 +349,11 @@ fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, F, G, Bx : BuilderMethods<'a, 'll, 't let rhs_sz = bx.cx().int_width(rhs_llty); let lhs_sz = bx.cx().int_width(lhs_llty); if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) + bx.trunc(rhs, lhs_llty) } else if lhs_sz > rhs_sz { // FIXME (#1877: If shifting by negative // values becomes not undefined then this is wrong. - zext(rhs, lhs_llty) + bx.zext(rhs, lhs_llty) } else { rhs } @@ -381,7 +372,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { } pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( - bx: &Bx, + bx: &mut Bx, val: >::Value ) { let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); @@ -389,7 +380,7 @@ pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( } pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( - bx: &Bx, + bx: &mut Bx, val: >::Value ) -> >::Value { if bx.cx().val_ty(val) == bx.cx().type_i1() { @@ -400,7 +391,7 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx> } pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, val: >::Value, layout: layout::TyLayout, ) -> >::Value { @@ -411,7 +402,7 @@ pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( } pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, val: >::Value, scalar: &layout::Scalar, ) -> >::Value { @@ -422,7 +413,7 @@ pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, } pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, dst: >::Value, src: >::Value, layout: TyLayout<'tcx>, @@ -560,7 +551,8 @@ pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, }; let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, cx.type_int(), true)); + let cast = bx.intcast(result, cx.type_int(), true); + bx.ret(cast); } } diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs index fa727589d235f..e52da7c0509d8 100644 --- a/src/librustc_codegen_ssa/common.rs +++ b/src/librustc_codegen_ssa/common.rs @@ -208,7 +208,7 @@ mod temp_stable_hash_impls { // of Java. (See related discussion on #1877 and #10183.) pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, lhs: >::Value, rhs: >::Value ) -> >::Value { @@ -219,7 +219,7 @@ pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll } pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, lhs_t: Ty<'tcx>, lhs: >::Value, rhs: >::Value @@ -236,15 +236,16 @@ pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll } fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, rhs: >::Value ) -> >::Value { let rhs_llty = bx.cx().val_ty(rhs); - bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) + let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false); + bx.and(rhs, shift_val) } pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, llty: >::Type, mask_llty: >::Type, invert: bool diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs index 6574bec8d94fa..dca14602ef4a6 100644 --- a/src/librustc_codegen_ssa/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -23,7 +23,7 @@ use interfaces::*; pub fn size_and_align_of_dst<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx> >( - bx: &Bx, + bx: &mut Bx, t: Ty<'tcx>, info: Option<>::Value> ) -> (>::Value, >::Value) where @@ -101,9 +101,10 @@ pub fn size_and_align_of_dst<'a, 'll: 'a, 'tcx: 'll, // pick the correct alignment statically. cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64) } - _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align), - sized_align, - unsized_align) + _ => { + let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align); + bx.select(cmp, sized_align, unsized_align) + } }; // Issue #27023: must add any necessary padding to `size` @@ -116,9 +117,11 @@ pub fn size_and_align_of_dst<'a, 'll: 'a, 'tcx: 'll, // emulated via the semi-standard fast bit trick: // // `(size + (align-1)) & -align` - - let addend = bx.sub(align, bx.cx().const_usize(1)); - let size = bx.and(bx.add(size, addend), bx.neg(align)); + let one = bx.cx().const_usize(1); + let addend = bx.sub(align, one); + let add = bx.add(size, addend); + let neg = bx.neg(align); + let size = bx.and(add, neg); (size, align) } diff --git a/src/librustc_codegen_ssa/interfaces/abi.rs b/src/librustc_codegen_ssa/interfaces/abi.rs index 2eed2e8c9b509..17f4791319f4f 100644 --- a/src/librustc_codegen_ssa/interfaces/abi.rs +++ b/src/librustc_codegen_ssa/interfaces/abi.rs @@ -25,7 +25,7 @@ pub trait AbiMethods<'tcx> { pub trait AbiBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn apply_attrs_callsite( - &self, + &mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: >::Value ); diff --git a/src/librustc_codegen_ssa/interfaces/asm.rs b/src/librustc_codegen_ssa/interfaces/asm.rs index 820ea3ce4ac35..b6830b0158abc 100644 --- a/src/librustc_codegen_ssa/interfaces/asm.rs +++ b/src/librustc_codegen_ssa/interfaces/asm.rs @@ -16,7 +16,7 @@ use super::builder::HasCodegen; pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ // Take an inline assembly expression and splat it out via LLVM fn codegen_inline_asm( - &self, + &mut self, ia: &InlineAsm, outputs: Vec>::Value>>, inputs: Vec<>::Value> diff --git a/src/librustc_codegen_ssa/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs index 4d45844e08a9e..f050d33617f73 100644 --- a/src/librustc_codegen_ssa/interfaces/builder.rs +++ b/src/librustc_codegen_ssa/interfaces/builder.rs @@ -63,170 +63,170 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + else_llbb: >::BasicBlock, ); fn switch( - &self, + &mut self, v: >::Value, else_llbb: >::BasicBlock, num_cases: usize, ) -> >::Value; fn invoke( - &self, + &mut self, llfn: >::Value, args: &[>::Value], then: >::BasicBlock, catch: >::BasicBlock, bundle: Option<&OperandBundleDef<'ll, >::Value>> ) -> >::Value; - fn unreachable(&self); + fn unreachable(&mut self); fn add( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fadd( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fadd_fast( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn sub( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fsub( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fsub_fast( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn mul( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fmul( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fmul_fast( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn udiv( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn exactudiv( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn sdiv( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn exactsdiv( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fdiv( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fdiv_fast( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn urem( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn srem( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn frem( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn frem_fast( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn shl( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn lshr( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn ashr( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn and( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn or( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn xor( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn neg( - &self, + &mut self, v: >::Value ) -> >::Value; fn fneg( - &self, + &mut self, v: >::Value ) -> >::Value; fn not( - &self, + &mut self, v: >::Value ) -> >::Value; fn alloca( - &self, + &mut self, ty: >::Type, name: &str, align: Align ) -> >::Value; fn dynamic_alloca( - &self, + &mut self, ty: >::Type, name: &str, align: Align ) -> >::Value; fn array_alloca( - &self, + &mut self, ty: >::Type, len: >::Value, name: &str, @@ -234,42 +234,42 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ) -> >::Value; fn load( - &self, + &mut self, ptr: >::Value, align: Align ) -> >::Value; fn volatile_load( - &self, + &mut self, ptr: >::Value ) -> >::Value; fn atomic_load( - &self, + &mut self, ptr: >::Value, order: AtomicOrdering, align: Align ) -> >::Value; fn load_ref( - &self, + &mut self, &PlaceRef<'tcx,>::Value> ) -> OperandRef<'tcx, >::Value>; - fn range_metadata(&self, load: >::Value, range: Range); - fn nonnull_metadata(&self, load: >::Value); + fn range_metadata(&mut self, load: >::Value, range: Range); + fn nonnull_metadata(&mut self, load: >::Value); fn store( - &self, + &mut self, val: >::Value, ptr: >::Value, align: Align ) -> >::Value; fn atomic_store( - &self, + &mut self, val: >::Value, ptr: >::Value, order: AtomicOrdering, align: Align ); fn store_with_flags( - &self, + &mut self, val: >::Value, ptr: >::Value, align: Align, @@ -277,109 +277,109 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ) -> >::Value; fn gep( - &self, + &mut self, ptr: >::Value, indices: &[>::Value] ) -> >::Value; fn inbounds_gep( - &self, + &mut self, ptr: >::Value, indices: &[>::Value] ) -> >::Value; fn struct_gep( - &self, + &mut self, ptr: >::Value, idx: u64 ) -> >::Value; fn trunc( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn sext( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn fptoui( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn fptosi( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn uitofp( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn sitofp( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn fptrunc( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn fpext( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn ptrtoint( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn inttoptr( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn bitcast( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn intcast( - &self, + &mut self, val: >::Value, dest_ty: >::Type, is_signed: bool ) -> >::Value; fn pointercast( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; fn icmp( - &self, + &mut self, op: IntPredicate, lhs: >::Value, rhs: >::Value ) -> >::Value; fn fcmp( - &self, + &mut self, op: RealPredicate, lhs: >::Value, rhs: >::Value ) -> >::Value; fn empty_phi( - &self, + &mut self, ty: >::Type) -> >::Value; fn phi( - &self, + &mut self, ty: >::Type, vals: &[>::Value], bbs: &[>::BasicBlock] ) -> >::Value; fn inline_asm_call( - &self, + &mut self, asm: *const c_char, cons: *const c_char, inputs: &[>::Value], @@ -390,167 +390,167 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ) -> Option<>::Value>; fn minnum( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn maxnum( - &self, + &mut self, lhs: >::Value, rhs: >::Value ) -> >::Value; fn select( - &self, cond: >::Value, + &mut self, cond: >::Value, then_val: >::Value, else_val: >::Value, ) -> >::Value; fn va_arg( - &self, + &mut self, list: >::Value, ty: >::Type ) -> >::Value; - fn extract_element(&self, + fn extract_element(&mut self, vec: >::Value, idx: >::Value ) -> >::Value; fn insert_element( - &self, vec: >::Value, + &mut self, vec: >::Value, elt: >::Value, idx: >::Value, ) -> >::Value; fn shuffle_vector( - &self, + &mut self, v1: >::Value, v2: >::Value, mask: >::Value ) -> >::Value; fn vector_splat( - &self, + &mut self, num_elts: usize, elt: >::Value ) -> >::Value; fn vector_reduce_fadd_fast( - &self, + &mut self, acc: >::Value, src: >::Value ) -> >::Value; fn vector_reduce_fmul_fast( - &self, + &mut self, acc: >::Value, src: >::Value ) -> >::Value; fn vector_reduce_add( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_mul( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_and( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_or( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_xor( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_fmin( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_fmax( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_fmin_fast( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_fmax_fast( - &self, + &mut self, src: >::Value ) -> >::Value; fn vector_reduce_min( - &self, + &mut self, src: >::Value, is_signed: bool ) -> >::Value; fn vector_reduce_max( - &self, + &mut self, src: >::Value, is_signed: bool ) -> >::Value; fn extract_value( - &self, + &mut self, agg_val: >::Value, idx: u64 ) -> >::Value; fn insert_value( - &self, + &mut self, agg_val: >::Value, elt: >::Value, idx: u64 ) -> >::Value; fn landing_pad( - &self, + &mut self, ty: >::Type, pers_fn: >::Value, num_clauses: usize ) -> >::Value; fn add_clause( - &self, + &mut self, landing_pad: >::Value, clause: >::Value ); fn set_cleanup( - &self, + &mut self, landing_pad: >::Value ); fn resume( - &self, + &mut self, exn: >::Value ) -> >::Value; fn cleanup_pad( - &self, + &mut self, parent: Option<>::Value>, args: &[>::Value] ) -> >::Value; fn cleanup_ret( - &self, cleanup: >::Value, + &mut self, cleanup: >::Value, unwind: Option<>::BasicBlock>, ) -> >::Value; fn catch_pad( - &self, + &mut self, parent: >::Value, args: &[>::Value] ) -> >::Value; fn catch_ret( - &self, + &mut self, pad: >::Value, unwind: >::BasicBlock ) -> >::Value; fn catch_switch( - &self, + &mut self, parent: Option<>::Value>, unwind: Option<>::BasicBlock>, num_handlers: usize, ) -> >::Value; fn add_handler( - &self, + &mut self, catch_switch: >::Value, handler: >::BasicBlock ); - fn set_personality_fn(&self, personality: >::Value); + fn set_personality_fn(&mut self, personality: >::Value); fn atomic_cmpxchg( - &self, + &mut self, dst: >::Value, cmp: >::Value, src: >::Value, @@ -559,45 +559,45 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + weak: bool, ) -> >::Value; fn atomic_rmw( - &self, + &mut self, op: AtomicRmwBinOp, dst: >::Value, src: >::Value, order: AtomicOrdering, ) -> >::Value; - fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope); + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); fn add_case( - &self, + &mut self, s: >::Value, on_val: >::Value, dest: >::BasicBlock ); fn add_incoming_to_phi( - &self, + &mut self, phi: >::Value, val: >::Value, bb: >::BasicBlock ); - fn set_invariant_load(&self, load: >::Value); + fn set_invariant_load(&mut self, load: >::Value); /// Returns the ptr value that should be used for storing `val`. fn check_store( - &self, + &mut self, val: >::Value, ptr: >::Value ) -> >::Value; /// Returns the args that should be used for a call to `llfn`. fn check_call<'b>( - &self, + &mut self, typ: &str, llfn: >::Value, args: &'b [>::Value] ) -> Cow<'b, [>::Value]> where [>::Value] : ToOwned; - fn lifetime_start(&self, ptr: >::Value, size: Size); - fn lifetime_end(&self, ptr: >::Value, size: Size); + fn lifetime_start(&mut self, ptr: >::Value, size: Size); + fn lifetime_end(&mut self, ptr: >::Value, size: Size); /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` @@ -608,20 +608,20 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). fn call_lifetime_intrinsic( - &self, + &mut self, intrinsic: &str, ptr: >::Value, size: Size ); fn call( - &self, + &mut self, llfn: >::Value, args: &[>::Value], bundle: Option<&OperandBundleDef<'ll, >::Value>> ) -> >::Value; fn call_memcpy( - &self, + &mut self, dst: >::Value, src: >::Value, n_bytes: >::Value, @@ -630,7 +630,7 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ); fn call_memset( - &self, + &mut self, ptr: >::Value, fill_byte: >::Value, size: >::Value, @@ -639,11 +639,11 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ) -> >::Value; fn zext( - &self, + &mut self, val: >::Value, dest_ty: >::Type ) -> >::Value; - fn delete_basic_block(&self, bb: >::BasicBlock); - fn do_not_inline(&self, llret: >::Value); + fn delete_basic_block(&mut self, bb: >::BasicBlock); + fn do_not_inline(&mut self, llret: >::Value); } diff --git a/src/librustc_codegen_ssa/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs index 6e01903fd7ab0..35d1b2a51fc5b 100644 --- a/src/librustc_codegen_ssa/interfaces/debuginfo.rs +++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs @@ -59,7 +59,7 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn declare_local( - &self, + &mut self, dbg_context: &FunctionDebugContext< >::DIScope >, @@ -71,12 +71,12 @@ pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, span: syntax_pos::Span, ); fn set_source_location( - &self, + &mut self, debug_context: &FunctionDebugContext< >::DIScope >, scope: Option<>::DIScope>, span: syntax_pos::Span, ); - fn insert_reference_to_gdb_debug_scripts_section_global(&self); + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); } diff --git a/src/librustc_codegen_ssa/interfaces/intrinsic.rs b/src/librustc_codegen_ssa/interfaces/intrinsic.rs index 8e4b5d64f2eba..e0a5e6dab19aa 100644 --- a/src/librustc_codegen_ssa/interfaces/intrinsic.rs +++ b/src/librustc_codegen_ssa/interfaces/intrinsic.rs @@ -21,7 +21,7 @@ pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tc /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_codegen_llvm/context.rs fn codegen_intrinsic_call( - &self, + &mut self, callee_ty: Ty<'tcx>, fn_ty: &FnType<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, >::Value>], diff --git a/src/librustc_codegen_ssa/interfaces/type_.rs b/src/librustc_codegen_ssa/interfaces/type_.rs index 1b3d729234c4d..6e3e9f3c553ca 100644 --- a/src/librustc_codegen_ssa/interfaces/type_.rs +++ b/src/librustc_codegen_ssa/interfaces/type_.rs @@ -120,12 +120,12 @@ pub trait LayoutTypeMethods<'ll, 'tcx> : Backend<'ll> { pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn store_fn_arg( - &self, + &mut self, ty: &ArgType<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, >::Value> ); fn store_arg_ty( - &self, + &mut self, ty: &ArgType<'tcx, Ty<'tcx>>, val: >::Value, dst: PlaceRef<'tcx, >::Value> diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index f4597fa829dd6..f2ae6e421fa6c 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -31,7 +31,7 @@ impl<'a, 'tcx> VirtualIndex { pub fn get_fn>( self, - bx: &Bx, + bx: &mut Bx, llvtable: >::Value, fn_ty: &FnType<'tcx, Ty<'tcx>> ) -> >::Value { @@ -43,10 +43,8 @@ impl<'a, 'tcx> VirtualIndex { bx.cx().type_ptr_to(bx.cx().type_ptr_to(bx.cx().fn_backend_type(fn_ty))) ); let ptr_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load( - bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]), - ptr_align - ); + let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); + let ptr = bx.load(gep, ptr_align); bx.nonnull_metadata(ptr); // Vtable loads are invariant bx.set_invariant_load(ptr); @@ -55,7 +53,7 @@ impl<'a, 'tcx> VirtualIndex { pub fn get_usize>( self, - bx: &Bx, + bx: &mut Bx, llvtable: >::Value ) -> >::Value { // Load the data pointer from the object. @@ -63,10 +61,8 @@ impl<'a, 'tcx> VirtualIndex { let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize())); let usize_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load( - bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]), - usize_align - ); + let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); + let ptr = bx.load(gep, usize_align); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index b216379b376d4..8e70f2378351b 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -93,7 +93,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline : Bx = this.new_block(name); + let mut trampoline : Bx = this.new_block(name); trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); trampoline.llbb() } else { @@ -136,9 +136,9 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> bx.apply_attrs_callsite(&fn_ty, invokeret); if let Some((ret_dest, target)) = destination { - let ret_bx = this.build_block::(target); - this.set_debug_loc(&ret_bx, terminator.source_info); - this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret); + let mut ret_bx = this.build_block::(target); + this.set_debug_loc(&mut ret_bx, terminator.source_info); + this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); } } else { let llret = bx.call(fn_ptr, &llargs, cleanup_bundle); @@ -160,16 +160,18 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } }; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { bx.cleanup_ret(cleanup_pad, None); } else { - let slot = self.get_personality_slot(&bx); - let lp0 = bx.load_ref(&slot.project_field(&bx, 0)).immediate(); - let lp1 = bx.load_ref(&slot.project_field(&bx, 1)).immediate(); - slot.storage_dead(&bx); + let slot = self.get_personality_slot(&mut bx); + let pr0 = slot.project_field(&mut bx, 0); + let lp0 = bx.load_ref(&pr0).immediate(); + let pr1 = slot.project_field(&mut bx, 1); + let lp1 = bx.load_ref(&pr1).immediate(); + slot.storage_dead(&mut bx); if !bx.cx().sess().target.target.options.custom_unwind_resume { let mut lp = bx.cx().const_undef(self.landing_pad_type()); @@ -195,7 +197,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { - let discr = self.codegen_operand(&bx, discr); + let discr = self.codegen_operand(&mut bx, discr); if targets.len() == 2 { // If there are two targets, emit br instead of switch let lltrue = llblock(self, targets[0]); @@ -240,11 +242,11 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } PassMode::Direct(_) | PassMode::Pair(..) => { - let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE)); + let op = self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE)); if let Ref(llval, _, align) = op.val { bx.load(llval, align) } else { - op.immediate_or_packed_pair(&bx) + op.immediate_or_packed_pair(&mut bx) } } @@ -262,8 +264,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret"); - op.val.store(&bx, scratch); + let scratch = PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + op.val.store(&mut bx, scratch); scratch.llval } Ref(llval, _, align) => { @@ -272,11 +274,10 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> llval } }; - bx.load( - bx.pointercast(llslot, bx.cx().type_ptr_to( - bx.cx().cast_backend_type(&cast_ty) - )), - self.fn_ty.ret.layout.align) + let addr = bx.pointercast(llslot, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&cast_ty) + )); + bx.load(addr, self.fn_ty.ret.layout.align) } }; bx.ret(llval); @@ -297,7 +298,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> return } - let place = self.codegen_place(&bx, location); + let place = self.codegen_place(&mut bx, location); let (args1, args2); let mut args = if let Some(llextra) = place.llextra { args2 = [place.llval, llextra]; @@ -317,7 +318,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> let fn_ty = bx.cx().new_vtable(sig, &[]); let vtable = args[1]; args = &args[..1]; - (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) + (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) } _ => { (bx.cx().get_fn(drop_fn), @@ -330,7 +331,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond = self.codegen_operand(&bx, cond).immediate(); + let cond = self.codegen_operand(&mut bx, cond).immediate(); let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked @@ -367,7 +368,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // After this point, bx is the block for the call to panic. bx = panic_block; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); // Get the location information. let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); @@ -382,8 +383,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { EvalErrorKind::BoundsCheck { ref len, ref index } => { - let len = self.codegen_operand(&bx, len).immediate(); - let index = self.codegen_operand(&bx, index).immediate(); + let len = self.codegen_operand(&mut bx, len).immediate(); + let index = self.codegen_operand(&mut bx, index).immediate(); let file_line_col = bx.cx().const_struct(&[filename, line, col], false); let file_line_col = bx.cx().static_addr_of( @@ -434,7 +435,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> from_hir_call: _ } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.codegen_operand(&bx, func); + let callee = self.codegen_operand(&mut bx, func); let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { @@ -468,7 +469,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> if intrinsic == Some("transmute") { if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; - self.codegen_transmute(&bx, &args[0], dest); + self.codegen_transmute(&mut bx, &args[0], dest); funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, @@ -559,7 +560,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // Prepare the return value destination let ret_dest = if let Some((ref dest, _)) = *destination { let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs, + self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, is_intrinsic) } else { ReturnDest::Nothing @@ -627,7 +628,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } } - self.codegen_operand(&bx, arg) + self.codegen_operand(&mut bx, arg) }).collect(); @@ -636,7 +637,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval); + self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { @@ -657,11 +658,11 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> }; for (i, arg) in first_args.iter().enumerate() { - let mut op = self.codegen_operand(&bx, arg); + let mut op = self.codegen_operand(&mut bx, arg); if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { if let Pair(data_ptr, meta) = op.val { llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&bx, meta, &fn_ty)); + .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue; } @@ -672,17 +673,17 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&bx, op.layout, "const"); - op.val.store(&bx, tmp); + let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } _ => {} } - self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]); + self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { - self.codegen_arguments_untupled(&bx, tup, &mut llargs, + self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, &fn_ty.args[first_args.len()..]) } @@ -705,7 +706,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> fn codegen_argument>( &mut self, - bx: &Bx, + bx: &mut Bx, op: OperandRef<'tcx, Cx::Value>, llargs: &mut Vec, arg: &ArgType<'tcx, Ty<'tcx>> @@ -771,9 +772,10 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to( + let addr = bx.pointercast(llval, bx.cx().type_ptr_to( bx.cx().cast_backend_type(&ty)) - ), align.min(arg.layout.align)); + ); + llval = bx.load(addr, align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -796,7 +798,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> fn codegen_arguments_untupled>( &mut self, - bx: &Bx, + bx: &mut Bx, operand: &mir::Operand<'tcx>, llargs: &mut Vec, args: &[ArgType<'tcx, Ty<'tcx>>] @@ -808,7 +810,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); - self.codegen_argument(bx, bx.load_ref(&field_ptr), llargs, &args[i]); + let load_ref = bx.load_ref(&field_ptr); + self.codegen_argument(bx, load_ref , llargs, &args[i]); } } else if let Ref(_, Some(_), _) = tuple.val { bug!("closure arguments must be sized") @@ -823,7 +826,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> fn get_personality_slot>( &mut self, - bx: &Bx + bx: &mut Bx ) -> PlaceRef<'tcx, Cx::Value> where Bx : BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx> { let cx = bx.cx(); if let Some(slot) = self.personality_slot { @@ -871,9 +874,9 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> let lp = bx.landing_pad(llretty, llpersonality, 1); bx.set_cleanup(lp); - let slot = self.get_personality_slot(&bx); - slot.storage_live(&bx); - Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot); + let slot = self.get_personality_slot(&mut bx); + slot.storage_live(&mut bx); + Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); bx.llbb() @@ -888,7 +891,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> &mut self ) -> Cx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let bl : Bx = self.new_block("unreachable"); + let mut bl : Bx = self.new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() @@ -910,7 +913,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> fn make_return_dest>( &mut self, - bx: &Bx, + bx: &mut Bx, dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, llargs: &mut Vec, is_intrinsic: bool @@ -970,7 +973,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> fn codegen_transmute>( &mut self, - bx: &Bx, + bx: &mut Bx, src: &mir::Operand<'tcx>, dst: &mir::Place<'tcx> ) { @@ -1001,7 +1004,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> fn codegen_transmute_into>( &mut self, - bx: &Bx, + bx: &mut Bx, src: &mir::Operand<'tcx>, dst: PlaceRef<'tcx, Cx::Value> ) { @@ -1016,7 +1019,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // Stores the return value of a function call into it's final location. fn store_return>( &mut self, - bx: &Bx, + bx: &mut Bx, dest: ReturnDest<'tcx, Cx::Value>, ret_ty: &ArgType<'tcx, Ty<'tcx>>, llval: Cx::Value diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 6912f78ace25d..a24079467496a 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -119,7 +119,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { pub fn set_debug_loc>( &mut self, - bx: &Bx, + bx: &mut Bx, source_info: mir::SourceInfo ) where Bx::CodegenCx : DebugInfoMethods<'ll, 'tcx, DIScope = Cx::DIScope> { let (scope, span) = self.debug_loc(source_info); @@ -291,11 +291,11 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( debug!("alloc: {:?} ({}) -> place", local, name); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str()); + PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str()); // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&bx, layout, &name.as_str()); + let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, @@ -317,10 +317,10 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( debug!("alloc: {:?} -> place", local); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local)); + PlaceRef::alloca_unsized_indirect(&mut bx, layout, &format!("{:?}", local)); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local))) + LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) } } else { // If this is an immediate local, we do not create an @@ -407,7 +407,7 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); @@ -490,7 +490,8 @@ fn arg_local_refs<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, ' if arg.pad.is_some() { llarg_idx += 1; } - bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i)); + let pr_field = place.project_field(bx, i); + bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } // Now that we have one alloca that contains the aggregate value, diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index fc10330082eac..2ba6b318b58d7 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -77,7 +77,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { } pub fn from_const>( - bx: &Bx, + bx: &mut Bx, val: &'tcx ty::Const<'tcx> ) -> Result, Lrc>> where Bx::CodegenCx : Backend<'ll, Value = V>, @@ -170,7 +170,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { /// For other cases, see `immediate`. pub fn immediate_or_packed_pair>( self, - bx: &Bx + bx: &mut Bx ) -> V where Bx::CodegenCx : Backend<'ll, Value=V> { if let OperandValue::Pair(a, b) = self.val { let llty = bx.cx().backend_type(&self.layout); @@ -178,8 +178,10 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { self, llty); // Reconstruct the immediate aggregate. let mut llpair = bx.cx().const_undef(llty); - llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); - llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); + let imm_a = base::from_immediate(bx, a); + let imm_b = base::from_immediate(bx, b); + llpair = bx.insert_value(llpair, imm_a, 0); + llpair = bx.insert_value(llpair, imm_b, 1); llpair } else { self.immediate() @@ -188,7 +190,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. pub fn from_immediate_or_packed_pair>( - bx: &Bx, + bx: &mut Bx, llval: >::Value, layout: TyLayout<'tcx> ) -> OperandRef<'tcx, >::Value> @@ -199,8 +201,10 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { llval, layout); // Deconstruct the immediate aggregate. - let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a); - let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b); + let a_llval = bx.extract_value(llval, 0); + let a_llval = base::to_immediate_scalar(bx, a_llval, a); + let b_llval = bx.extract_value(llval, 1); + let b_llval = base::to_immediate_scalar(bx, b_llval, b); OperandValue::Pair(a_llval, b_llval) } else { OperandValue::Immediate(llval) @@ -209,7 +213,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { } pub fn extract_field>( - &self, bx: &Bx, + &self, bx: &mut Bx, i: usize ) -> OperandRef<'tcx, >::Value> where Bx::CodegenCx : Backend<'ll, Value=V>, @@ -275,7 +279,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { pub fn store>( self, - bx: &Bx, + bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> ) where Bx::CodegenCx : Backend<'ll, Value = V> { self.store_with_flags(bx, dest, MemFlags::empty()); @@ -283,7 +287,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { pub fn volatile_store>( self, - bx: &Bx, + bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> ) where Bx::CodegenCx : Backend<'ll, Value = V> { self.store_with_flags(bx, dest, MemFlags::VOLATILE); @@ -291,7 +295,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { pub fn unaligned_volatile_store>( self, - bx: &Bx, + bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> ) where Bx::CodegenCx : Backend<'ll, Value = V> { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); @@ -299,7 +303,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { pub fn nontemporal_store>( self, - bx: &Bx, + bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> ) where Bx::CodegenCx : Backend<'ll, Value = V> { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); @@ -307,7 +311,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { fn store_with_flags>( self, - bx: &Bx, + bx: &mut Bx, dest: PlaceRef<'tcx, >::Value>, flags: MemFlags, ) where Bx::CodegenCx : Backend<'ll, Value = V> { @@ -340,7 +344,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { } pub fn store_unsized>( self, - bx: &Bx, + bx: &mut Bx, indirect_dest: PlaceRef<'tcx, V> ) where Bx::CodegenCx : Backend<'ll, Value = V>, @@ -380,7 +384,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> { fn maybe_codegen_consume_direct>( &mut self, - bx: &Bx, + bx: &mut Bx, place: &mir::Place<'tcx> ) -> Option> where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> @@ -430,7 +434,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> pub fn codegen_consume>( &mut self, - bx: &Bx, + bx: &mut Bx, place: &mir::Place<'tcx> ) -> OperandRef<'tcx, Cx::Value> where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> @@ -451,12 +455,13 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> // for most places, to consume them we just load them // out from their home - bx.load_ref(&self.codegen_place(bx, place)) + let addr = self.codegen_place(bx, place); + bx.load_ref(&addr) } pub fn codegen_operand>( &mut self, - bx: &Bx, + bx: &mut Bx, operand: &mir::Operand<'tcx> ) -> OperandRef<'tcx, Cx::Value> where Bx : BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx>, diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index f893796deab6a..6fa1a2a6e28cb 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -51,7 +51,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { } pub fn alloca>( - bx: &Bx, + bx: &mut Bx, layout: TyLayout<'tcx>, name: &str ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V> { @@ -63,7 +63,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { /// Returns a place for an indirect reference to an unsized place. pub fn alloca_unsized_indirect>( - bx: &Bx, + bx: &mut Bx, layout: TyLayout<'tcx>, name: &str ) -> PlaceRef<'tcx, V> @@ -98,7 +98,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. pub fn project_field>( - self, bx: &Bx, + self, bx: &mut Bx, ix: usize ) -> PlaceRef<'tcx, >::Value> where @@ -110,7 +110,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); - let simple = || { + let mut simple = || { // Unions and newtypes only use an offset of 0. let llval = if offset.bytes() == 0 { self.llval @@ -186,8 +186,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { // Calculate offset let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64)); - let offset = bx.and(bx.add(unaligned_offset, align_sub_1), - bx.neg(unsized_align)); + let and_lhs = bx.add(unaligned_offset, align_sub_1); + let and_rhs = bx.neg(unsized_align); + let offset = bx.and(and_lhs, and_rhs); debug!("struct_field_ptr: DST field offset: {:?}", offset); @@ -210,7 +211,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { /// Obtain the actual discriminant of a value. pub fn codegen_get_discr>( self, - bx: &Bx, + bx: &mut Bx, cast_to: Ty<'tcx> ) -> V where Bx::CodegenCx : Backend<'ll, Value = V>, @@ -261,7 +262,8 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { } else { bx.cx().const_uint_big(niche_llty, niche_start) }; - bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval), + let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval); + bx.select(select_arg, bx.cx().const_uint(cast_to, *niche_variants.start() as u64), bx.cx().const_uint(cast_to, dataful_variant as u64)) } else { @@ -269,8 +271,10 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); let lldiscr_max = bx.cx().const_uint(niche_llty, *niche_variants.end() as u64); - bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max), - bx.intcast(lldiscr, cast_to, false), + let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max); + let cast = bx.intcast(lldiscr, cast_to, false); + bx.select(select_arg, + cast, bx.cx().const_uint(cast_to, dataful_variant as u64)) } } @@ -281,7 +285,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { /// representation. pub fn codegen_set_discr>( &self, - bx: &Bx, + bx: &mut Bx, variant_index: usize ) where Bx::CodegenCx : Backend<'ll, Value=V>, @@ -347,7 +351,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn project_index>( &self, - bx: &Bx, + bx: &mut Bx, llindex: V ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V>, @@ -363,7 +367,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn project_downcast>( &self, - bx: &Bx, + bx: &mut Bx, variant_index: usize ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V>, @@ -381,13 +385,13 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { } impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { - pub fn storage_live>(&self, bx: &Bx) + pub fn storage_live>(&self, bx: &mut Bx) where Bx::CodegenCx : Backend<'ll, Value = V> { bx.lifetime_start(self.llval, self.layout.size); } - pub fn storage_dead>(&self, bx: &Bx) + pub fn storage_dead>(&self, bx: &mut Bx) where Bx::CodegenCx : Backend<'ll, Value = V> { bx.lifetime_end(self.llval, self.layout.size); @@ -400,7 +404,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { pub fn codegen_place>( &mut self, - bx: &Bx, + bx: &mut Bx, place: &mir::Place<'tcx> ) -> PlaceRef<'tcx, Cx::Value> { debug!("codegen_place(place={:?})", place); diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 9859804cd0340..da28249f1ad73 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -42,10 +42,10 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); + let cg_operand = self.codegen_operand(&mut bx, operand); // FIXME: consider not copying constants through stack. (fixable by codegenning // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - cg_operand.val.store(&bx, dest); + cg_operand.val.store(&mut bx, dest); bx } @@ -55,8 +55,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> if bx.cx().is_backend_scalar_pair(&dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); return bx; } @@ -64,7 +64,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> // this to be eliminated by MIR building, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); match operand.val { OperandValue::Pair(..) | OperandValue::Immediate(_) => { @@ -75,15 +75,15 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp"); - scratch.storage_live(&bx); - operand.val.store(&bx, scratch); - base::coerce_unsized_into(&bx, scratch, dest); - scratch.storage_dead(&bx); + let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + scratch.storage_live(&mut bx); + operand.val.store(&mut bx, scratch); + base::coerce_unsized_into(&mut bx, scratch, dest); + scratch.storage_dead(&mut bx); } OperandValue::Ref(llref, None, align) => { let source = PlaceRef::new_sized(llref, operand.layout, align); - base::coerce_unsized_into(&bx, source, dest); + base::coerce_unsized_into(&mut bx, source, dest); } OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue") @@ -93,14 +93,14 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } mir::Rvalue::Repeat(ref elem, count) => { - let cg_elem = self.codegen_operand(&bx, elem); + let cg_elem = self.codegen_operand(&mut bx, elem); // Do not generate the loop for zero-sized elements or empty arrays. if dest.layout.is_zst() { return bx; } - - let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval; + let zero = bx.cx().const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { let align = bx.cx().const_i32(dest.align.abi() as i32); @@ -114,7 +114,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } // Use llvm.memset.p0i8.* to initialize byte arrays - let v = base::from_immediate(&bx, v); + let v = base::from_immediate(&mut bx, v); if bx.cx().val_ty(v) == bx.cx().type_i8() { bx.call_memset(start, v, size, align, false); return bx; @@ -122,7 +122,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } let count = bx.cx().const_usize(count); - let end = dest.project_index(&bx, count).llval; + let end = dest.project_index(&mut bx, count).llval; let mut header_bx = bx.build_sibling_block("repeat_loop_header"); let mut body_bx = bx.build_sibling_block("repeat_loop_body"); @@ -134,7 +134,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - cg_elem.val.store(&body_bx, + cg_elem.val.store(&mut body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); @@ -147,9 +147,9 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> mir::Rvalue::Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { - dest.codegen_set_discr(&bx, variant_index); + dest.codegen_set_discr(&mut bx, variant_index); if adt_def.is_enum() { - (dest.project_downcast(&bx, variant_index), active_field_index) + (dest.project_downcast(&mut bx, variant_index), active_field_index) } else { (dest, active_field_index) } @@ -157,11 +157,12 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> _ => (dest, None) }; for (i, operand) in operands.iter().enumerate() { - let op = self.codegen_operand(&bx, operand); + let op = self.codegen_operand(&mut bx, operand); // Do not generate stores and GEPis for zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - op.val.store(&bx, dest.project_field(&bx, field_index)); + let field = dest.project_field(&mut bx, field_index); + op.val.store(&mut bx, field); } } bx @@ -169,15 +170,15 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> _ => { assert!(self.rvalue_creates_operand(rvalue)); - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); bx } } } pub fn codegen_rvalue_unsized>(&mut self, - bx: Bx, + mut bx: Bx, indirect_dest: PlaceRef<'tcx, Cx::Value>, rvalue: &mir::Rvalue<'tcx>) -> Bx @@ -187,8 +188,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); - cg_operand.val.store_unsized(&bx, indirect_dest); + let cg_operand = self.codegen_operand(&mut bx, operand); + cg_operand.val.store_unsized(&mut bx, indirect_dest); bx } @@ -198,14 +199,14 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> pub fn codegen_rvalue_operand>( &mut self, - bx: Bx, + mut bx: Bx, rvalue: &mir::Rvalue<'tcx> ) -> (Bx, OperandRef<'tcx, Cx::Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); @@ -258,7 +259,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } OperandValue::Immediate(lldata) => { // "standard" unsize - let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata, + let (lldata, llextra) = base::unsize_thin_ptr(&mut bx, lldata, operand.layout.ty, cast.ty); OperandValue::Pair(lldata, llextra) } @@ -332,12 +333,13 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. - - base::call_assume(&bx, bx.icmp( + let ll_t_in_const = bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()); + let cmp = bx.icmp( IntPredicate::IntULE, llval, - bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()) - )); + ll_t_in_const + ); + base::call_assume(&mut bx, cmp); } } } @@ -369,11 +371,11 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => - cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out), + cast_int_to_float(&mut bx, signed, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(IntTy::I)) => - cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(_)) => - cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out), _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty) }; OperandValue::Immediate(newval) @@ -386,7 +388,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } mir::Rvalue::Ref(_, bk, ref place) => { - let cg_place = self.codegen_place(&bx, place); + let cg_place = self.codegen_place(&mut bx, place); let ty = cg_place.layout.ty; @@ -407,7 +409,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } mir::Rvalue::Len(ref place) => { - let size = self.evaluate_array_len(&bx, place); + let size = self.evaluate_array_len(&mut bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), layout: bx.cx().layout_of(bx.tcx().types.usize), @@ -416,12 +418,12 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); let llresult = match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.codegen_fat_ptr_binop(&bx, op, + self.codegen_fat_ptr_binop(&mut bx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, lhs.layout.ty) @@ -429,7 +431,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty) + self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty) } _ => bug!() @@ -442,9 +444,9 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> (bx, operand) } mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); - let result = self.codegen_scalar_checked_binop(&bx, op, + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); + let result = self.codegen_scalar_checked_binop(&mut bx, op, lhs.immediate(), rhs.immediate(), lhs.layout.ty); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); @@ -458,7 +460,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); let lloperand = operand.immediate(); let is_float = operand.layout.ty.is_fp(); let llval = match op { @@ -477,8 +479,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> mir::Rvalue::Discriminant(ref place) => { let discr_ty = rvalue.ty(&*self.mir, bx.tcx()); - let discr = self.codegen_place(&bx, place) - .codegen_get_discr(&bx, discr_ty); + let discr = self.codegen_place(&mut bx, place) + .codegen_get_discr(&mut bx, discr_ty); (bx, OperandRef { val: OperandValue::Immediate(discr), layout: self.cx.layout_of(discr_ty) @@ -512,7 +514,8 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> }; let instance = ty::Instance::mono(bx.tcx(), def_id); let r = bx.cx().get_fn(instance); - let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr); + let call = bx.call(r, &[llsize, llalign], None); + let val = bx.pointercast(call, llty_ptr); let operand = OperandRef { val: OperandValue::Immediate(val), @@ -521,7 +524,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> (bx, operand) } mir::Rvalue::Use(ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); (bx, operand) } mir::Rvalue::Repeat(..) | @@ -537,7 +540,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> fn evaluate_array_len>( &mut self, - bx: &Bx, + bx: &mut Bx, place: &mir::Place<'tcx>, ) -> Cx::Value { // ZST are passed as operands and require special handling @@ -557,7 +560,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> pub fn codegen_scalar_binop>( &mut self, - bx: &Bx, + bx: &mut Bx, op: mir::BinOp, lhs: Cx::Value, rhs: Cx::Value, @@ -625,7 +628,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> pub fn codegen_fat_ptr_binop>( &mut self, - bx: &Bx, + bx: &mut Bx, op: mir::BinOp, lhs_addr: Cx::Value, lhs_extra: Cx::Value, @@ -635,16 +638,14 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> ) -> Cx::Value { match op { mir::BinOp::Eq => { - bx.and( - bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr), - bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra); + bx.and(lhs, rhs) } mir::BinOp::Ne => { - bx.or( - bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr), - bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra); + bx.or(lhs, rhs) } mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => { @@ -656,14 +657,11 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT), _ => bug!(), }; - - bx.or( - bx.icmp(strict_op, lhs_addr, rhs_addr), - bx.and( - bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr), - bx.icmp(op, lhs_extra, rhs_extra) - ) - ) + let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr); + let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let and_rhs = bx.icmp(op, lhs_extra, rhs_extra); + let rhs = bx.and(and_lhs, and_rhs); + bx.or(lhs, rhs) } _ => { bug!("unexpected fat ptr binop"); @@ -673,7 +671,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> pub fn codegen_scalar_checked_binop>( &mut self, - bx: &Bx, + bx: &mut Bx, op: mir::BinOp, lhs: Cx::Value, rhs: Cx::Value, @@ -758,7 +756,7 @@ enum OverflowOp { fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( oop: OverflowOp, - bx: &Bx, + bx: &mut Bx, ty: Ty ) -> >::Value { use syntax::ast::IntTy::*; @@ -826,7 +824,7 @@ fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 't } fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, signed: bool, x: >::Value, int_ty: >::Type, @@ -849,7 +847,8 @@ fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( let overflow = bx.icmp(IntPredicate::IntUGE, x, max); let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); let infinity = bx.bitcast(infinity_bits, float_ty); - bx.select(overflow, infinity, bx.uitofp(x, float_ty)) + let fp = bx.uitofp(x, float_ty); + bx.select(overflow, infinity, fp) } else { if signed { bx.sitofp(x, float_ty) @@ -860,7 +859,7 @@ fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( } fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( - bx: &Bx, + bx: &mut Bx, signed: bool, x: >::Value, float_ty: >::Type, @@ -875,6 +874,9 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } + + let int_width = bx.cx().int_width(int_ty); + let float_width = bx.cx().float_width(float_ty); // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the // destination integer type after rounding towards zero. This `undef` value can cause UB in // safe code (see issue #10184), so we implement a saturating conversion on top of it: @@ -894,50 +896,50 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - let int_max = |signed: bool, int_ty: >::Type| -> u128 { - let shift_amount = 128 - bx.cx().int_width(int_ty); + let int_max = |signed: bool, int_width: u64| -> u128 { + let shift_amount = 128 - int_width; if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } }; - let int_min = |signed: bool, int_ty: >::Type| -> i128 { + let int_min = |signed: bool, int_width: u64| -> i128 { if signed { - i128::MIN >> (128 - bx.cx().int_width(int_ty)) + i128::MIN >> (128 - int_width) } else { 0 } }; let compute_clamp_bounds_single = - |signed: bool, int_ty: >::Type| -> (u128, u128) { - let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero); + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); - let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero); + let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero); assert!(rounded_max.value.is_finite()); (rounded_min.value.to_bits(), rounded_max.value.to_bits()) }; let compute_clamp_bounds_double = - |signed: bool, int_ty: >::Type| -> (u128, u128) { - let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero); + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); - let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero); + let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero); assert!(rounded_max.value.is_finite()); (rounded_min.value.to_bits(), rounded_max.value.to_bits()) }; - let float_bits_to_llval = |bits| { - let bits_llval = match bx.cx().float_width(float_ty) { + let mut float_bits_to_llval = |bits| { + let bits_llval = match float_width { 32 => bx.cx().const_u32(bits as u32), 64 => bx.cx().const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; bx.bitcast(bits_llval, float_ty) }; - let (f_min, f_max) = match bx.cx().float_width(float_ty) { - 32 => compute_clamp_bounds_single(signed, int_ty), - 64 => compute_clamp_bounds_double(signed, int_ty), + let (f_min, f_max) = match float_width { + 32 => compute_clamp_bounds_single(signed, int_width), + 64 => compute_clamp_bounds_double(signed, int_width), n => bug!("unsupported float width {}", n), }; let f_min = float_bits_to_llval(f_min); @@ -985,8 +987,8 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128); + let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -995,7 +997,9 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0)) + let zero = bx.cx().const_uint(int_ty, 0); + let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); + bx.select(cmp, s1, zero) } else { s1 } diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs index 623962c20ad88..76e0595ec0aed 100644 --- a/src/librustc_codegen_ssa/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -24,12 +24,12 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> { pub fn codegen_statement>( &mut self, - bx: Bx, + mut bx: Bx, statement: &mir::Statement<'tcx> ) -> Bx { debug!("codegen_statement(statement={:?})", statement); - self.set_debug_loc(&bx, statement.source_info); + self.set_debug_loc(&mut bx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref place, ref rvalue) => { if let mir::Place::Local(index) = *place { @@ -58,39 +58,39 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } } } else { - let cg_dest = self.codegen_place(&bx, place); + let cg_dest = self.codegen_place(&mut bx, place); self.codegen_rvalue(bx, cg_dest, rvalue) } } mir::StatementKind::SetDiscriminant{ref place, variant_index} => { - self.codegen_place(&bx, place) - .codegen_set_discr(&bx, variant_index); + self.codegen_place(&mut bx, place) + .codegen_set_discr(&mut bx, variant_index); bx } mir::StatementKind::StorageLive(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_live(&bx); + cg_place.storage_live(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_live(&bx); + cg_indirect_place.storage_live(&mut bx); } bx } mir::StatementKind::StorageDead(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_dead(&bx); + cg_place.storage_dead(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_dead(&bx); + cg_indirect_place.storage_dead(&mut bx); } bx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { - self.codegen_place(&bx, output) + self.codegen_place(&mut bx, output) }).collect(); let input_vals = inputs.iter() .try_fold(Vec::with_capacity(inputs.len()), |mut acc, input| { - let op = self.codegen_operand(&bx, input); + let op = self.codegen_operand(&mutbx, input); if let OperandValue::Immediate(_) = op.val { acc.push(op.immediate()); Ok(acc) From afd940a54ae0ff6dab82ead6bc7c315b279598d3 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 9 Oct 2018 16:01:02 +0200 Subject: [PATCH 73/76] Added default impl for DerivedTypeMethods + empty impl for Cranelift BaseTypeMethods --- src/librustc_codegen_llvm/abi.rs | 12 +- src/librustc_codegen_llvm/context.rs | 2 +- src/librustc_codegen_llvm/debuginfo/gdb.rs | 4 +- src/librustc_codegen_llvm/type_.rs | 134 +--------------- src/librustc_codegen_ssa/base.rs | 56 +++++-- src/librustc_codegen_ssa/callee.rs | 9 +- src/librustc_codegen_ssa/common.rs | 25 ++- src/librustc_codegen_ssa/interfaces/abi.rs | 6 +- src/librustc_codegen_ssa/interfaces/asm.rs | 7 +- .../interfaces/builder.rs | 19 ++- .../interfaces/debuginfo.rs | 6 +- .../interfaces/intrinsic.rs | 7 +- src/librustc_codegen_ssa/interfaces/mod.rs | 11 +- src/librustc_codegen_ssa/interfaces/type_.rs | 147 ++++++++++++++---- src/librustc_codegen_ssa/meth.rs | 12 +- src/librustc_codegen_ssa/mir/analyze.rs | 12 +- src/librustc_codegen_ssa/mir/block.rs | 13 +- src/librustc_codegen_ssa/mir/constant.rs | 55 +------ src/librustc_codegen_ssa/mir/mod.rs | 31 ++-- src/librustc_codegen_ssa/mir/operand.rs | 40 +++-- src/librustc_codegen_ssa/mir/place.rs | 18 ++- src/librustc_codegen_ssa/mir/rvalue.rs | 21 ++- src/librustc_codegen_ssa/mir/statement.rs | 2 +- 23 files changed, 353 insertions(+), 296 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 60644de100ce8..3ff9d81a2d95f 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -325,7 +325,11 @@ pub trait FnTypeExt<'tcx> { fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value); + fn apply_attrs_callsite( + &self, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + callsite: &'ll Value + ); } impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { @@ -728,7 +732,11 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value) { + fn apply_attrs_callsite( + &self, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + callsite: &'ll Value + ) { let mut i = 0; let mut apply = |attrs: &ArgAttributes| { attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index f38307eeb9357..024b3613b6075 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -470,7 +470,7 @@ impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } -impl<'ll, 'tcx: 'll> CodegenMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} +impl<'a, 'll: 'a, 'tcx: 'll> CodegenMethods<'a, 'll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} impl IntrinsicDeclarationMethods<'b> for CodegenCx<'b, 'tcx, &'b Value> { fn get_intrinsic(&self, key: &str) -> &'b Value { diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index d27f20d5b272f..d617ad2b8e112 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -23,7 +23,9 @@ use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, 'll, '_, &'ll Value>) { +pub fn insert_reference_to_gdb_debug_scripts_section_global( + bx: &mut Builder<'_, 'll, '_, &'ll Value> +) { if needs_gdb_debug_scripts_section(bx.cx()) { let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 8582ab0077439..4178b398e13ee 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -18,16 +18,12 @@ use context::CodegenCx; use value::Value; use rustc_codegen_ssa::interfaces::*; - -use syntax::ast; -use rustc::ty::layout::{self, Align, Size}; use rustc::util::nodemap::FxHashMap; -use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::{Ty, TyCtxt}; use rustc::ty::layout::TyLayout; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; use common; -use rustc_codegen_ssa; use rustc_codegen_ssa::common::TypeKind; use type_of::LayoutLlvmExt; use abi::{LlvmType, FnTypeExt}; @@ -111,6 +107,10 @@ impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } + fn type_isize(&self) -> &'ll Type { + self.isize_ty + } + fn type_f32(&self) -> &'ll Type { unsafe { llvm::LLVMFloatTypeInContext(&self.llcx) @@ -279,126 +279,8 @@ impl Type { } } -impl DerivedTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - - fn type_bool(&self) -> &'ll Type { - &self.type_i8() - } - - fn type_char(&self) -> &'ll Type { - &self.type_i32() - } - - fn type_i8p(&self) -> &'ll Type { - &self.type_ptr_to(&self.type_i8()) - } - - fn type_isize(&self) -> &'ll Type { - &self.isize_ty - } - - fn type_int(&self) -> &'ll Type { - match &self.sess().target.target.target_c_int_width[..] { - "16" => &self.type_i16(), - "32" => &self.type_i32(), - "64" => &self.type_i64(), - width => bug!("Unsupported target_c_int_width: {}", width), - } - } - - fn type_int_from_ty( - &self, - t: ast::IntTy - ) -> &'ll Type { - match t { - ast::IntTy::Isize => &self.isize_ty, - ast::IntTy::I8 => &self.type_i8(), - ast::IntTy::I16 => &self.type_i16(), - ast::IntTy::I32 => &self.type_i32(), - ast::IntTy::I64 => &self.type_i64(), - ast::IntTy::I128 => &self.type_i128(), - } - } - - fn type_uint_from_ty( - &self, - t: ast::UintTy - ) -> &'ll Type { - match t { - ast::UintTy::Usize => &self.isize_ty, - ast::UintTy::U8 => &self.type_i8(), - ast::UintTy::U16 => &self.type_i16(), - ast::UintTy::U32 => &self.type_i32(), - ast::UintTy::U64 => &self.type_i64(), - ast::UintTy::U128 => &self.type_i128(), - } - } - - fn type_float_from_ty( - &self, - t: ast::FloatTy - ) -> &'ll Type { - match t { - ast::FloatTy::F32 => &self.type_f32(), - ast::FloatTy::F64 => &self.type_f64(), - } - } - - fn type_from_integer(&self, i: layout::Integer) -> &'ll Type { - use rustc::ty::layout::Integer::*; - match i { - I8 => &self.type_i8(), - I16 => &self.type_i16(), - I32 => &self.type_i32(), - I64 => &self.type_i64(), - I128 => &self.type_i128(), - } - } - - fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_abi_align(self, align); - &self.type_from_integer(ity) - } - - fn type_padding_filler( - &self, - size: Size, - align: Align - ) -> &'ll Type { - let unit = layout::Integer::approximate_abi_align(self, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - &self.type_array(&self.type_from_integer(unit), size / unit_size) - } - - fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - rustc_codegen_ssa::common::type_needs_drop(*self.tcx(), ty) - } - - fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - rustc_codegen_ssa::common::type_is_sized(*self.tcx(), ty) - } - - fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - rustc_codegen_ssa::common::type_is_freeze(*self.tcx(), ty) - } - - fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - use syntax_pos::DUMMY_SP; - if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { - return false; - } - - let tail = self.tcx().struct_tail(ty); - match tail.sty { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), - } - } -} +impl<'a, 'll: 'a, 'tcx: 'll> DerivedTypeMethods<'a, 'll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> + {} impl LayoutTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { fn backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { @@ -435,4 +317,4 @@ impl LayoutTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { } } -impl TypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} +impl<'a, 'll: 'a, 'tcx: 'll> TypeMethods<'a, 'll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 256efdc11ff4e..cbe41065f75c8 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -72,14 +72,18 @@ use mir::operand::OperandValue; use std::marker::PhantomData; -pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { +pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ cx: &'a Cx, name: Option, istart: usize, phantom: PhantomData<(&'ll (), &'tcx ())> } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> StatRecorder<'a, 'll, 'tcx, Cx> { +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> StatRecorder<'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ pub fn new(cx: &'a Cx, name: String) -> Self { let istart = cx.stats().borrow().n_llvm_insns; StatRecorder { @@ -91,8 +95,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> StatRecorder<'a } } -impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> Drop for +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> Drop for StatRecorder<'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn drop(&mut self) { if self.cx.sess().codegen_stats() { @@ -147,7 +152,9 @@ pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tc t: Ty<'tcx>, ret_ty: >::Type, op: hir::BinOpKind -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let signed = match t.sty { ty::Float(_) => { let cmp = bin_op_to_fcmp_predicate(op); @@ -174,7 +181,7 @@ pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tc /// The `old_info` argument is a bit funny. It is intended for use /// in an upcast, where the new vtable for an object will be derived /// from the old one. -pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( +pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( cx: &'a Cx, source: Ty<'tcx>, target: Ty<'tcx>, @@ -326,7 +333,9 @@ pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, op: hir::BinOpKind, lhs: >::Value, rhs: >::Value -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ cast_shift_rhs(bx, op, lhs, rhs) } @@ -335,7 +344,9 @@ fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( op: hir::BinOpKind, lhs: >::Value, rhs: >::Value, -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ // Shifts may have any size int on the rhs if op.is_shift() { let mut rhs_llty = bx.cx().val_ty(rhs); @@ -374,7 +385,9 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( bx: &mut Bx, val: >::Value -) { +) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); bx.call(assume_intrinsic, &[val], None); } @@ -382,7 +395,9 @@ pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( bx: &mut Bx, val: >::Value -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { @@ -394,7 +409,9 @@ pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( bx: &mut Bx, val: >::Value, layout: layout::TyLayout, -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ if let layout::Abi::Scalar(ref scalar) = layout.abi { return to_immediate_scalar(bx, val, scalar); } @@ -405,7 +422,9 @@ pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, bx: &mut Bx, val: >::Value, scalar: &layout::Scalar, -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ if scalar.is_bool() { return bx.trunc(val, bx.cx().type_i1()); } @@ -419,7 +438,9 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( layout: TyLayout<'tcx>, align: Align, flags: MemFlags, -) { +) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let size = layout.size.bytes(); if size == 0 { return; @@ -467,7 +488,9 @@ pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx /// users main function. pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( cx: &'a Bx::CodegenCx -) { +) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { Some((id, span, _)) => { (cx.tcx().hir.local_def_id(id), span) @@ -498,7 +521,10 @@ pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, rust_main: >::Value, rust_main_def_id: DefId, use_start_lang_item: bool, - ) { + ) + where &'a Bx::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let llfty = cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); @@ -551,7 +577,7 @@ pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, }; let result = bx.call(start_fn, &args, None); - let cast = bx.intcast(result, cx.type_int(), true); + let cast = bx.intcast(result, cx.type_int(), true); bx.ret(cast); } } diff --git a/src/librustc_codegen_ssa/callee.rs b/src/librustc_codegen_ssa/callee.rs index a69058f2accf5..52359b334dc58 100644 --- a/src/librustc_codegen_ssa/callee.rs +++ b/src/librustc_codegen_ssa/callee.rs @@ -9,15 +9,18 @@ // except according to those terms. use interfaces::*; -use rustc::ty; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::ty::subst::Substs; use rustc::hir::def_id::DefId; -pub fn resolve_and_get_fn<'ll, 'tcx: 'll, Cx : CodegenMethods<'ll, 'tcx>>( +pub fn resolve_and_get_fn<'a, 'll: 'a, 'tcx: 'll, Cx : 'a + CodegenMethods<'a, 'll, 'tcx>>( cx: &Cx, def_id: DefId, substs: &'tcx Substs<'tcx>, -) -> Cx::Value { +) -> Cx::Value + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ cx.get_fn( ty::Instance::resolve( *cx.tcx(), diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs index e52da7c0509d8..8493f57c51d71 100644 --- a/src/librustc_codegen_ssa/common.rs +++ b/src/librustc_codegen_ssa/common.rs @@ -13,6 +13,7 @@ use rustc::ty::{self, Ty, TyCtxt}; use syntax_pos::{DUMMY_SP, Span}; use rustc::hir::def_id::DefId; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::middle::lang_items::LangItem; use base; use interfaces::*; @@ -211,7 +212,9 @@ pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll bx: &mut Bx, lhs: >::Value, rhs: >::Value -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bx, rhs); @@ -223,7 +226,9 @@ pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll lhs_t: Ty<'tcx>, lhs: >::Value, rhs: >::Value -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bx, rhs); @@ -238,7 +243,9 @@ pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( bx: &mut Bx, rhs: >::Value -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let rhs_llty = bx.cx().val_ty(rhs); let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false); bx.and(rhs, shift_val) @@ -249,7 +256,9 @@ pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>> llty: >::Type, mask_llty: >::Type, invert: bool -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let kind = bx.cx().type_kind(llty); match kind { TypeKind::Integer => { @@ -274,10 +283,12 @@ pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>> } } -pub fn ty_fn_sig<'ll, 'tcx:'ll, Cx: CodegenMethods<'ll, 'tcx>>( - cx: &Cx, +pub fn ty_fn_sig<'a, 'll: 'a, 'tcx:'ll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( + cx: &'a Cx, ty: Ty<'tcx> -) -> ty::PolyFnSig<'tcx> { +) -> ty::PolyFnSig<'tcx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ match ty.sty { ty::FnDef(..) | // Shims currently have type FnPtr. Not sure this should remain. diff --git a/src/librustc_codegen_ssa/interfaces/abi.rs b/src/librustc_codegen_ssa/interfaces/abi.rs index 17f4791319f4f..3acd0e10c308c 100644 --- a/src/librustc_codegen_ssa/interfaces/abi.rs +++ b/src/librustc_codegen_ssa/interfaces/abi.rs @@ -10,6 +10,7 @@ use rustc_target::abi::call::FnType; use rustc::ty::{FnSig, Ty, Instance}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use super::Backend; use super::builder::HasCodegen; @@ -23,7 +24,10 @@ pub trait AbiMethods<'tcx> { fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>; } -pub trait AbiBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { +pub trait AbiBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ fn apply_attrs_callsite( &mut self, ty: &FnType<'tcx, Ty<'tcx>>, diff --git a/src/librustc_codegen_ssa/interfaces/asm.rs b/src/librustc_codegen_ssa/interfaces/asm.rs index b6830b0158abc..f7dd5a7747d13 100644 --- a/src/librustc_codegen_ssa/interfaces/asm.rs +++ b/src/librustc_codegen_ssa/interfaces/asm.rs @@ -9,11 +9,16 @@ // except according to those terms. use rustc::hir::{InlineAsm, GlobalAsm}; +use rustc::ty::Ty; +use rustc::ty::layout::{TyLayout, LayoutOf, HasTyCtxt}; use mir::place::PlaceRef; use super::Backend; use super::builder::HasCodegen; -pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx>{ +pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ // Take an inline assembly expression and splat it out via LLVM fn codegen_inline_asm( &mut self, diff --git a/src/librustc_codegen_ssa/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs index f050d33617f73..1b2cea7775d6a 100644 --- a/src/librustc_codegen_ssa/interfaces/builder.rs +++ b/src/librustc_codegen_ssa/interfaces/builder.rs @@ -11,8 +11,8 @@ use common::{IntPredicate, RealPredicate, AtomicOrdering, SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; use libc::c_char; -use rustc::ty::TyCtxt; -use rustc::ty::layout::{Align, Size}; +use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::layout::{Align, Size, TyLayout, HasTyCtxt, LayoutOf}; use MemFlags; use super::Backend; use super::CodegenMethods; @@ -28,14 +28,19 @@ use std::borrow::Cow; use std::ops::Range; use syntax::ast::AsmDialect; -pub trait HasCodegen<'a, 'll: 'a, 'tcx :'ll> { - type CodegenCx : 'a + CodegenMethods<'ll, 'tcx>; +pub trait HasCodegen<'a, 'll: 'a, 'tcx :'ll> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + type CodegenCx : 'a + CodegenMethods<'a, 'll, 'tcx>; } pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + DebugInfoBuilderMethods<'a, 'll, 'tcx> + ArgTypeMethods<'a, 'll, 'tcx> + AbiBuilderMethods<'a, 'll, 'tcx> + IntrinsicCallMethods<'a, 'll, 'tcx> + AsmBuilderMethods<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn new_block<'b>( cx: &'a Self::CodegenCx, @@ -252,7 +257,11 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + &PlaceRef<'tcx,>::Value> ) -> OperandRef<'tcx, >::Value>; - fn range_metadata(&mut self, load: >::Value, range: Range); + fn range_metadata( + &mut self, + load: >::Value, + range: Range + ); fn nonnull_metadata(&mut self, load: >::Value); fn store( diff --git a/src/librustc_codegen_ssa/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs index 35d1b2a51fc5b..1cd24560f689f 100644 --- a/src/librustc_codegen_ssa/interfaces/debuginfo.rs +++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs @@ -9,6 +9,7 @@ // except according to those terms. use rustc::ty::{Ty, FnSig}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use super::Backend; use super::builder::HasCodegen; use rustc::mir; @@ -57,7 +58,10 @@ pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4]; } -pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { +pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ fn declare_local( &mut self, dbg_context: &FunctionDebugContext< diff --git a/src/librustc_codegen_ssa/interfaces/intrinsic.rs b/src/librustc_codegen_ssa/interfaces/intrinsic.rs index e0a5e6dab19aa..3be14cf34b8f4 100644 --- a/src/librustc_codegen_ssa/interfaces/intrinsic.rs +++ b/src/librustc_codegen_ssa/interfaces/intrinsic.rs @@ -12,11 +12,14 @@ use super::Backend; use super::builder::HasCodegen; use mir::operand::OperandRef; use rustc::ty::Ty; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use rustc_target::abi::call::FnType; use syntax_pos::Span; -pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { - +pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_codegen_llvm/context.rs diff --git a/src/librustc_codegen_ssa/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs index 61cad6d0b44ba..cb1f292a1e1bb 100644 --- a/src/librustc_codegen_ssa/interfaces/mod.rs +++ b/src/librustc_codegen_ssa/interfaces/mod.rs @@ -37,6 +37,9 @@ mod debuginfo; mod abi; mod asm; +use rustc::ty::Ty; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; + pub use self::backend::{Backend, ExtraBackendMethods}; pub use self::misc::MiscMethods; pub use self::statics::StaticMethods; @@ -53,8 +56,10 @@ pub use self::asm::{AsmMethods, AsmBuilderMethods}; pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} -pub trait CodegenMethods<'ll, 'tcx: 'll> : - Backend<'ll> + TypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + +pub trait CodegenMethods<'a, 'll: 'a, 'tcx: 'll> : + Backend<'ll> + TypeMethods<'a, 'll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods + - PreDefineMethods<'ll, 'tcx> {} + PreDefineMethods<'ll, 'tcx> + where &'a Self : 'a + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{} diff --git a/src/librustc_codegen_ssa/interfaces/type_.rs b/src/librustc_codegen_ssa/interfaces/type_.rs index 6e3e9f3c553ca..9f29c74469f4e 100644 --- a/src/librustc_codegen_ssa/interfaces/type_.rs +++ b/src/librustc_codegen_ssa/interfaces/type_.rs @@ -10,13 +10,14 @@ use super::Backend; use super::builder::HasCodegen; -use common::TypeKind; +use super::misc::MiscMethods; +use common::{self, TypeKind}; use syntax::ast; use rustc::ty::layout::{self, Align, Size}; use std::cell::RefCell; use rustc::util::nodemap::FxHashMap; -use rustc::ty::{Ty, TyCtxt}; -use rustc::ty::layout::TyLayout; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{TyLayout, LayoutOf, HasTyCtxt}; use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; use mir::place::PlaceRef; @@ -33,6 +34,7 @@ pub trait BaseTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { // Creates an integer type with the given number of bits, e.g. i24 fn type_ix(&self, num_bits: u64) -> Self::Type; + fn type_isize(&self) -> Self::Type; fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; @@ -63,42 +65,124 @@ pub trait BaseTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { fn tcx(&self) -> &TyCtxt<'ll, 'tcx, 'tcx>; } -pub trait DerivedTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { - fn type_bool(&self) -> Self::Type; - fn type_char(&self) -> Self::Type; - fn type_i8p(&self) -> Self::Type; - fn type_isize(&self) -> Self::Type; - fn type_int(&self) -> Self::Type; +pub trait DerivedTypeMethods<'a, 'll: 'a, 'tcx: 'll> : + Backend<'ll> + BaseTypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + where &'a Self : 'a + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + + fn type_bool(&self) -> Self::Type { + self.type_i8() + } + + fn type_char(&self) -> Self::Type { + self.type_i32() + } + + fn type_i8p(&self) -> Self::Type { + self.type_ptr_to(self.type_i8()) + } + + fn type_int(&self) -> Self::Type { + match &self.sess().target.target.target_c_int_width[..] { + "16" => self.type_i16(), + "32" => self.type_i32(), + "64" => self.type_i64(), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + fn type_int_from_ty( &self, t: ast::IntTy - ) -> Self::Type; + ) -> Self::Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + fn type_uint_from_ty( &self, t: ast::UintTy - ) -> Self::Type; + ) -> Self::Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + fn type_float_from_ty( &self, t: ast::FloatTy - ) -> Self::Type; - fn type_from_integer(&self, i: layout::Integer) -> Self::Type; + ) -> Self::Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + fn type_from_integer(&self, i: layout::Integer) -> Self::Type { + use rustc::ty::layout::Integer::*; + match i { + I8 => self.type_i8(), + I16 => self.type_i16(), + I32 => self.type_i32(), + I64 => self.type_i64(), + I128 => self.type_i128(), + } + } + + fn type_pointee_for_abi_align(&'a self, align: Align) -> Self::Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_abi_align(self, align); + self.type_from_integer(ity) + } - /// Return a LLVM type that has at most the required alignment, - /// as a conservative approximation for unknown pointee types. - fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type; - - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. fn type_padding_filler( - &self, + &'a self, size: Size, align: Align - ) -> Self::Type; - - fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool; - fn type_is_sized(&self, ty: Ty<'tcx>) -> bool; - fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool; - fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool; + ) -> Self::Type { + let unit = layout::Integer::approximate_abi_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + common::type_needs_drop(*self.tcx(), ty) + } + + fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + common::type_is_sized(*self.tcx(), ty) + } + + fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { + common::type_is_freeze(*self.tcx(), ty) + } + + fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { + use syntax_pos::DUMMY_SP; + if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { + return false; + } + + let tail = self.tcx().struct_tail(ty); + match tail.sty { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail.sty), + } + } } pub trait LayoutTypeMethods<'ll, 'tcx> : Backend<'ll> { @@ -118,7 +202,10 @@ pub trait LayoutTypeMethods<'ll, 'tcx> : Backend<'ll> { ) -> Self::Type; } -pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { +pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ fn store_fn_arg( &mut self, ty: &ArgType<'tcx, Ty<'tcx>>, @@ -133,5 +220,7 @@ pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> { fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> >::Type; } -pub trait TypeMethods<'ll, 'tcx: 'll> : - BaseTypeMethods<'ll, 'tcx> + DerivedTypeMethods<'ll, 'tcx> + LayoutTypeMethods<'ll, 'tcx> {} +pub trait TypeMethods<'a, 'll: 'a, 'tcx: 'll> : + BaseTypeMethods<'ll, 'tcx> + DerivedTypeMethods<'a, 'll, 'tcx> + LayoutTypeMethods<'ll, 'tcx> + where &'a Self : 'a + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{} diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index f2ae6e421fa6c..ae5288578e443 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -34,7 +34,10 @@ impl<'a, 'tcx> VirtualIndex { bx: &mut Bx, llvtable: >::Value, fn_ty: &FnType<'tcx, Ty<'tcx>> - ) -> >::Value { + ) -> >::Value + where &'a Bx::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); @@ -55,7 +58,10 @@ impl<'a, 'tcx> VirtualIndex { self, bx: &mut Bx, llvtable: >::Value - ) -> >::Value { + ) -> >::Value + where &'a Bx::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); @@ -77,7 +83,7 @@ impl<'a, 'tcx> VirtualIndex { /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. -pub fn get_vtable<'a, 'll: 'a, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>>( +pub fn get_vtable<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( cx: &'a Cx, ty: Ty<'tcx>, trait_ref: ty::PolyExistentialTraitRef<'tcx>, diff --git a/src/librustc_codegen_ssa/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs index 850b2c24af851..2330cbd54928f 100644 --- a/src/librustc_codegen_ssa/mir/analyze.rs +++ b/src/librustc_codegen_ssa/mir/analyze.rs @@ -22,7 +22,7 @@ use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use super::FunctionCx; use interfaces::*; -pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>>( +pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( fx: &FunctionCx<'a, 'f, 'll, 'tcx, Cx> ) -> BitSet where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> @@ -57,8 +57,10 @@ pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<' struct LocalAnalyzer< 'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, - Cx: 'a + CodegenMethods<'ll, 'tcx> - > { + Cx: 'a + CodegenMethods<'a, 'll, 'tcx> + > + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>, dominators: Dominators, non_ssa_locals: BitSet, @@ -67,7 +69,7 @@ struct LocalAnalyzer< first_assignment: IndexVec } -impl> LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> +impl> LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn new(fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>) -> Self { @@ -111,7 +113,7 @@ impl> LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, } } -impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> +impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index 8e70f2378351b..5c4f0ed111ac8 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -31,7 +31,7 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { @@ -242,7 +242,10 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> } PassMode::Direct(_) | PassMode::Pair(..) => { - let op = self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE)); + let op = self.codegen_consume( + &mut bx, + &mir::Place::Local(mir::RETURN_PLACE) + ); if let Ref(llval, _, align) = op.val { bx.load(llval, align) } else { @@ -264,7 +267,11 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + let scratch = PlaceRef::alloca( + &mut bx, + self.fn_ty.ret.layout, + "ret" + ); op.val.store(&mut bx, scratch); scratch.llval } diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs index 6c9359716251d..98fdc53ef0400 100644 --- a/src/librustc_codegen_ssa/mir/constant.rs +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -21,60 +21,7 @@ use interfaces::*; use super::FunctionCx; -pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocation) -> &'ll Value { - let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); - let layout = cx.data_layout(); - let pointer_size = layout.pointer_size.bytes() as usize; - - let mut next_offset = 0; - for &(offset, ((), alloc_id)) in alloc.relocations.iter() { - let offset = offset.bytes(); - assert_eq!(offset as usize as u64, offset); - let offset = offset as usize; - if offset > next_offset { - llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset])); - } - let ptr_offset = read_target_uint( - layout.endian, - &alloc.bytes[offset..(offset + pointer_size)], - ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - llvals.push(cx.scalar_to_backend( - Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), - &layout::Scalar { - value: layout::Primitive::Pointer, - valid_range: 0..=!0 - }, - cx.type_i8p() - )); - next_offset = offset + pointer_size; - } - if alloc.bytes.len() >= next_offset { - llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..])); - } - - cx.const_struct(&llvals, true) -} - -pub fn codegen_static_initializer( - cx: &CodegenCx<'ll, 'tcx, &'ll Value>, - def_id: DefId, -) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { - let instance = ty::Instance::mono(cx.tcx, def_id); - let cid = GlobalId { - instance, - promoted: None, - }; - let param_env = ty::ParamEnv::reveal_all(); - let static_ = cx.tcx.const_eval(param_env.and(cid))?; - - let alloc = match static_.val { - ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, - _ => bug!("static const eval returned {:#?}", static_), - }; - Ok((const_alloc_to_llvm(cx, alloc), alloc)) -} - -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index a24079467496a..fcca370e0a31a 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -36,7 +36,9 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> { +pub struct FunctionCx<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, @@ -100,8 +102,9 @@ pub struct FunctionCx<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods< param_substs: &'tcx Substs<'tcx>, } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> @@ -114,14 +117,17 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { pub fn set_debug_loc>( &mut self, bx: &mut Bx, source_info: mir::SourceInfo - ) where Bx::CodegenCx : DebugInfoMethods<'ll, 'tcx, DIScope = Cx::DIScope> { + ) where Bx::CodegenCx : DebugInfoMethods<'ll, 'tcx, DIScope = Cx::DIScope>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let (scope, span) = self.debug_loc(source_info); bx.set_source_location(&self.debug_context, scope, span); } @@ -193,11 +199,13 @@ enum LocalRef<'tcx, V> { Operand(Option>), } -impl<'ll, 'tcx: 'll, V : 'll + CodegenObject> LocalRef<'tcx, V> { - fn new_operand>( - cx: &Cx, +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + cx: &'a Cx, layout: TyLayout<'tcx> - ) -> LocalRef<'tcx, V> where Cx: Backend<'ll, Value=V> { + ) -> LocalRef<'tcx, V> where Cx: Backend<'ll, Value=V>, + &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -317,7 +325,11 @@ pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( debug!("alloc: {:?} -> place", local); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&mut bx, layout, &format!("{:?}", local)); + PlaceRef::alloca_unsized_indirect( + &mut bx, + layout, + &format!("{:?}", local) + ); LocalRef::UnsizedPlace(indirect_place) } else { LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) @@ -376,6 +388,7 @@ fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( block_bxs: &IndexVec>::BasicBlock>) -> (IndexVec>::BasicBlock>>, IndexVec>::Value>>>) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 2ba6b318b58d7..ab37e149933e2 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -65,10 +65,12 @@ impl fmt::Debug for OperandRef<'tcx, V> { } impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst>( - cx: &Cx, + pub fn new_zst>( + cx: &'a Cx, layout: TyLayout<'tcx> - ) -> OperandRef<'tcx, V> where Cx : Backend<'ll, Value = V> { + ) -> OperandRef<'tcx, V> where Cx : Backend<'ll, Value = V>, + &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { assert!(layout.is_zst()); OperandRef { val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(&layout))), @@ -143,7 +145,7 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { } } - pub fn deref>( + pub fn deref>( self, cx: &'a Cx ) -> PlaceRef<'tcx, V> where @@ -171,7 +173,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { pub fn immediate_or_packed_pair>( self, bx: &mut Bx - ) -> V where Bx::CodegenCx : Backend<'ll, Value=V> { + ) -> V where Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if let OperandValue::Pair(a, b) = self.val { let llty = bx.cx().backend_type(&self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", @@ -194,7 +198,8 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { llval: >::Value, layout: TyLayout<'tcx> ) -> OperandRef<'tcx, >::Value> - where Bx::CodegenCx : Backend<'ll, Value=V> + where Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", @@ -281,7 +286,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { self, bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> - ) where Bx::CodegenCx : Backend<'ll, Value = V> { + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::empty()); } @@ -289,7 +296,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { self, bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> - ) where Bx::CodegenCx : Backend<'ll, Value = V> { + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } @@ -297,7 +306,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { self, bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> - ) where Bx::CodegenCx : Backend<'ll, Value = V> { + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } @@ -305,7 +316,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { self, bx: &mut Bx, dest: PlaceRef<'tcx, >::Value> - ) where Bx::CodegenCx : Backend<'ll, Value = V> { + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } @@ -314,7 +327,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { bx: &mut Bx, dest: PlaceRef<'tcx, >::Value>, flags: MemFlags, - ) where Bx::CodegenCx : Backend<'ll, Value = V> { + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. @@ -379,8 +394,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { fn maybe_codegen_consume_direct>( &mut self, diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 6fa1a2a6e28cb..c03be9c63b544 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -54,7 +54,9 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, layout: TyLayout<'tcx>, name: &str - ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V> { + ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); let tmp = bx.alloca(bx.cx().backend_type(&layout), name, layout.align); @@ -77,10 +79,12 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { Self::alloca(bx, ptr_layout, name) } - pub fn len>( + pub fn len>( &self, cx: &Cx - ) -> V where Cx : Backend<'ll, Value=V> { + ) -> V where Cx : Backend<'ll, Value=V>, + &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); @@ -386,19 +390,21 @@ impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn storage_live>(&self, bx: &mut Bx) - where Bx::CodegenCx : Backend<'ll, Value = V> + where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { bx.lifetime_start(self.llval, self.layout.size); } pub fn storage_dead>(&self, bx: &mut Bx) - where Bx::CodegenCx : Backend<'ll, Value = V> + where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { bx.lifetime_end(self.llval, self.layout.size); } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index da28249f1ad73..7a97bf20c02a1 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -27,7 +27,7 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { @@ -333,7 +333,10 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. - let ll_t_in_const = bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()); + let ll_t_in_const = bx.cx().const_uint_big( + ll_t_in, + *scalar.valid_range.end() + ); let cmp = bx.icmp( IntPredicate::IntULE, llval, @@ -721,7 +724,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> } } -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { @@ -758,7 +761,9 @@ fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 't oop: OverflowOp, bx: &mut Bx, ty: Ty -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; @@ -829,7 +834,9 @@ fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( x: >::Value, int_ty: >::Type, float_ty: >::Type -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. @@ -864,7 +871,9 @@ fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( x: >::Value, float_ty: >::Type, int_ty: >::Type -) -> >::Value { +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs index 76e0595ec0aed..f017eedc5248d 100644 --- a/src/librustc_codegen_ssa/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -18,7 +18,7 @@ use rustc::ty::layout::{TyLayout, HasTyCtxt, LayoutOf}; use interfaces::*; use value::Value; -impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: CodegenMethods<'ll, 'tcx>> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { From b2c66a223d8465b91eef21c43487530c060e61b1 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Wed, 10 Oct 2018 12:17:43 +0200 Subject: [PATCH 74/76] Traits skeletton fully in place --- src/librustc_codegen_ssa/interfaces/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_codegen_ssa/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs index 1b2cea7775d6a..6061a3729b473 100644 --- a/src/librustc_codegen_ssa/interfaces/builder.rs +++ b/src/librustc_codegen_ssa/interfaces/builder.rs @@ -254,7 +254,7 @@ pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + ) -> >::Value; fn load_ref( &mut self, - &PlaceRef<'tcx,>::Value> + ptr: &PlaceRef<'tcx,>::Value> ) -> OperandRef<'tcx, >::Value>; fn range_metadata( From 6d63072300cb8187e72c43114e47e1d67401ab34 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Tue, 23 Oct 2018 17:01:35 +0200 Subject: [PATCH 75/76] Separating the back folder between backend-agnostic and LLVM-specific code --- src/Cargo.lock | 14 + src/librustc_codegen_llvm/back/archive.rs | 24 +- src/librustc_codegen_llvm/back/link.rs | 225 +- src/librustc_codegen_llvm/back/lto.rs | 426 ++-- src/librustc_codegen_llvm/back/write.rs | 1862 +---------------- src/librustc_codegen_llvm/base.rs | 8 +- src/librustc_codegen_llvm/consts.rs | 4 +- src/librustc_codegen_llvm/context.rs | 8 +- src/librustc_codegen_llvm/debuginfo/mod.rs | 2 +- src/librustc_codegen_llvm/diagnostics.rs | 35 +- src/librustc_codegen_llvm/intrinsic.rs | 17 +- src/librustc_codegen_llvm/lib.rs | 169 +- src/librustc_codegen_ssa/Cargo.toml | 9 + src/librustc_codegen_ssa/back/archive.rs | 36 + .../back/command.rs | 0 src/librustc_codegen_ssa/back/link.rs | 213 ++ .../back/linker.rs | 592 +++--- src/librustc_codegen_ssa/back/lto.rs | 124 ++ src/librustc_codegen_ssa/back/mod.rs | 17 + .../back/symbol_export.rs | 2 +- src/librustc_codegen_ssa/back/write.rs | 1826 ++++++++++++++++ src/librustc_codegen_ssa/base.rs | 50 +- src/librustc_codegen_ssa/diagnostics.rs | 11 + .../interfaces/backend.rs | 47 +- src/librustc_codegen_ssa/interfaces/misc.rs | 5 +- src/librustc_codegen_ssa/interfaces/mod.rs | 2 + src/librustc_codegen_ssa/interfaces/write.rs | 78 + src/librustc_codegen_ssa/lib.rs | 25 + src/librustc_codegen_ssa/meth.rs | 6 +- src/librustc_codegen_ssa/mir/statement.rs | 5 +- 30 files changed, 3079 insertions(+), 2763 deletions(-) create mode 100644 src/librustc_codegen_ssa/back/archive.rs rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/back/command.rs (100%) create mode 100644 src/librustc_codegen_ssa/back/link.rs rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/back/linker.rs (96%) create mode 100644 src/librustc_codegen_ssa/back/lto.rs create mode 100644 src/librustc_codegen_ssa/back/mod.rs rename src/{librustc_codegen_llvm => librustc_codegen_ssa}/back/symbol_export.rs (99%) create mode 100644 src/librustc_codegen_ssa/back/write.rs create mode 100644 src/librustc_codegen_ssa/interfaces/write.rs diff --git a/src/Cargo.lock b/src/Cargo.lock index a33c726f36053..a704bd4faf640 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2132,10 +2132,24 @@ dependencies = [ name = "rustc_codegen_ssa" version = "0.0.0" dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_allocator 0.0.0", + "rustc_apfloat 0.0.0", + "rustc_codegen_utils 0.0.0", "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_fs_util 0.0.0", + "rustc_incremental 0.0.0", "rustc_mir 0.0.0", "rustc_target 0.0.0", + "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] diff --git a/src/librustc_codegen_llvm/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs index af9efc6d7c417..c51f683b6d8ae 100644 --- a/src/librustc_codegen_llvm/back/archive.rs +++ b/src/librustc_codegen_llvm/back/archive.rs @@ -18,6 +18,7 @@ use std::ptr; use std::str; use back::bytecode::RLIB_BYTECODE_EXTENSION; +use rustc_codegen_ssa::back::archive::find_library; use libc; use llvm::archive_ro::{ArchiveRO, Child}; use llvm::{self, ArchiveKind}; @@ -52,29 +53,6 @@ enum Addition { }, } -pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) - -> PathBuf { - // On Windows, static libraries sometimes show up as libfoo.a and other - // times show up as foo.lib - let oslibname = format!("{}{}{}", - sess.target.target.options.staticlib_prefix, - name, - sess.target.target.options.staticlib_suffix); - let unixlibname = format!("lib{}.a", name); - - for path in search_paths { - debug!("looking for {} inside {:?}", name, path); - let test = path.join(&oslibname); - if test.exists() { return test } - if oslibname != unixlibname { - let test = path.join(&unixlibname); - if test.exists() { return test } - } - } - sess.fatal(&format!("could not find native static library `{}`, \ - perhaps an -L flag is missing?", name)); -} - fn is_relevant_child(c: &Child) -> bool { match c.name() { Some(name) => !name.contains("SYMDEF"), diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 2e84d223a8b94..6b92eb025bf50 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -9,11 +9,12 @@ // except according to those terms. use back::wasm; -use cc::windows_registry; use super::archive::{ArchiveBuilder, ArchiveConfig}; use super::bytecode::RLIB_BYTECODE_EXTENSION; -use super::linker::Linker; -use super::command::Command; +use rustc_codegen_ssa::back::linker::Linker; +use rustc_codegen_ssa::back::link::{remove, ignored_for_lto, each_linked_rlib, linker_and_flavor, + get_linker}; +use rustc_codegen_ssa::back::command::Command; use super::rpath::RPathConfig; use super::rpath; use metadata::METADATA_FILENAME; @@ -22,10 +23,9 @@ use rustc::session::config::{RUST_CGU_EXT, Lto}; use rustc::session::filesearch; use rustc::session::search_paths::PathKind; use rustc::session::Session; -use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; +use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; -use rustc_codegen_ssa::CrateInfo; -use CodegenResults; +use rustc_codegen_ssa::CodegenResults; use rustc::util::common::time; use rustc_fs_util::fix_windows_verbatim_for_gcc; use rustc::hir::def_id::CrateNum; @@ -34,6 +34,7 @@ use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor}; use rustc_data_structures::fx::FxHashSet; use context::get_reloc_model; use llvm; +use LlvmCodegenBackend; use std::ascii; use std::char; @@ -51,77 +52,11 @@ pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default invalid_output_for_target, out_filename, check_file_is_writeable, filename_for_metadata}; -// The third parameter is for env vars, used on windows to set up the -// path for MSVC to find its DLLs, and gcc to find its bundled -// toolchain -pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { - let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); - - // If our linker looks like a batch script on Windows then to execute this - // we'll need to spawn `cmd` explicitly. This is primarily done to handle - // emscripten where the linker is `emcc.bat` and needs to be spawned as - // `cmd /c emcc.bat ...`. - // - // This worked historically but is needed manually since #42436 (regression - // was tagged as #42791) and some more info can be found on #44443 for - // emscripten itself. - let mut cmd = match linker.to_str() { - Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), - _ => match flavor { - LinkerFlavor::Lld(f) => Command::lld(linker, f), - LinkerFlavor::Msvc - if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => - { - Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) - }, - _ => Command::new(linker), - } - }; - - // The compiler's sysroot often has some bundled tools, so add it to the - // PATH for the child. - let mut new_path = sess.host_filesearch(PathKind::All) - .get_tools_search_paths(); - let mut msvc_changed_path = false; - if sess.target.target.options.is_like_msvc { - if let Some(ref tool) = msvc_tool { - cmd.args(tool.args()); - for &(ref k, ref v) in tool.env() { - if k == "PATH" { - new_path.extend(env::split_paths(v)); - msvc_changed_path = true; - } else { - cmd.env(k, v); - } - } - } - } - - if !msvc_changed_path { - if let Some(path) = env::var_os("PATH") { - new_path.extend(env::split_paths(&path)); - } - } - cmd.env("PATH", env::join_paths(new_path).unwrap()); - - (linker.to_path_buf(), cmd) -} - -pub fn remove(sess: &Session, path: &Path) { - match fs::remove_file(path) { - Ok(..) => {} - Err(e) => { - sess.err(&format!("failed to remove {}: {}", - path.display(), - e)); - } - } -} /// Perform the linkage portion of the compilation phase. This will generate all /// of the requested outputs for this compilation session. pub(crate) fn link_binary(sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, outputs: &OutputFilenames, crate_name: &str) -> Vec { let mut out_filenames = Vec::new(); @@ -220,62 +155,8 @@ fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { false } -pub(crate) fn each_linked_rlib(sess: &Session, - info: &CrateInfo, - f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { - let crates = info.used_crates_static.iter(); - let fmts = sess.dependency_formats.borrow(); - let fmts = fmts.get(&config::CrateType::Executable) - .or_else(|| fmts.get(&config::CrateType::Staticlib)) - .or_else(|| fmts.get(&config::CrateType::Cdylib)) - .or_else(|| fmts.get(&config::CrateType::ProcMacro)); - let fmts = match fmts { - Some(f) => f, - None => return Err("could not find formats for rlibs".to_string()) - }; - for &(cnum, ref path) in crates { - match fmts.get(cnum.as_usize() - 1) { - Some(&Linkage::NotLinked) | - Some(&Linkage::IncludedFromDylib) => continue, - Some(_) => {} - None => return Err("could not find formats for rlibs".to_string()) - } - let name = &info.crate_name[&cnum]; - let path = match *path { - LibSource::Some(ref p) => p, - LibSource::MetadataOnly => { - return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", - name)) - } - LibSource::None => { - return Err(format!("could not find rlib for: `{}`", name)) - } - }; - f(cnum, &path); - } - Ok(()) -} - -/// Returns a boolean indicating whether the specified crate should be ignored -/// during LTO. -/// -/// Crates ignored during LTO are not lumped together in the "massive object -/// file" that we create and are linked in their normal rlib states. See -/// comments below for what crates do not participate in LTO. -/// -/// It's unusual for a crate to not participate in LTO. Typically only -/// compiler-specific and unstable crates have a reason to not participate in -/// LTO. -pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { - // If our target enables builtin function lowering in LLVM then the - // crates providing these functions don't participate in LTO (e.g. - // no_builtins or compiler builtins crates). - !sess.target.target.options.no_builtins && - (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum)) -} - fn link_binary_output(sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, crate_type: config::CrateType, outputs: &OutputFilenames, crate_name: &str) -> Vec { @@ -362,8 +243,11 @@ fn archive_config<'a>(sess: &'a Session, /// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a /// directory being searched for `extern crate` (observing an incomplete file). /// The returned path is the temporary file containing the complete metadata. -fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir) - -> PathBuf { +fn emit_metadata<'a>( + sess: &'a Session, + codegen_results: &CodegenResults, + tmpdir: &TempDir +) -> PathBuf { let out_filename = tmpdir.path().join(METADATA_FILENAME); let result = fs::write(&out_filename, &codegen_results.metadata.raw_data); @@ -386,7 +270,7 @@ enum RlibFlavor { // all of the object files from native libraries. This is done by unzipping // native libraries and inserting all of the contents into this archive. fn link_rlib<'a>(sess: &'a Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, flavor: RlibFlavor, out_filename: &Path, tmpdir: &TempDir) -> ArchiveBuilder<'a> { @@ -500,7 +384,7 @@ fn link_rlib<'a>(sess: &'a Session, // link in the metadata object file (and also don't prepare the archive with a // metadata file). fn link_staticlib(sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, out_filename: &Path, tempdir: &TempDir) { let mut ab = link_rlib(sess, @@ -585,69 +469,6 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { } } -pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { - fn infer_from( - sess: &Session, - linker: Option, - flavor: Option, - ) -> Option<(PathBuf, LinkerFlavor)> { - match (linker, flavor) { - (Some(linker), Some(flavor)) => Some((linker, flavor)), - // only the linker flavor is known; use the default linker for the selected flavor - (None, Some(flavor)) => Some((PathBuf::from(match flavor { - LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, - LinkerFlavor::Gcc => "cc", - LinkerFlavor::Ld => "ld", - LinkerFlavor::Msvc => "link.exe", - LinkerFlavor::Lld(_) => "lld", - }), flavor)), - (Some(linker), None) => { - let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { - sess.fatal("couldn't extract file stem from specified linker"); - }).to_owned(); - - let flavor = if stem == "emcc" { - LinkerFlavor::Em - } else if stem == "gcc" || stem.ends_with("-gcc") { - LinkerFlavor::Gcc - } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { - LinkerFlavor::Ld - } else if stem == "link" || stem == "lld-link" { - LinkerFlavor::Msvc - } else if stem == "lld" || stem == "rust-lld" { - LinkerFlavor::Lld(sess.target.target.options.lld_flavor) - } else { - // fall back to the value in the target spec - sess.target.target.linker_flavor - }; - - Some((linker, flavor)) - }, - (None, None) => None, - } - } - - // linker and linker flavor specified via command line have precedence over what the target - // specification specifies - if let Some(ret) = infer_from( - sess, - sess.opts.cg.linker.clone(), - sess.opts.debugging_opts.linker_flavor, - ) { - return ret; - } - - if let Some(ret) = infer_from( - sess, - sess.target.target.options.linker.clone().map(PathBuf::from), - Some(sess.target.target.linker_flavor), - ) { - return ret; - } - - bug!("Not enough information provided to determine how to invoke the linker"); -} - // Create a dynamic library or executable // // This will invoke the system linker/cc to create the resulting file. This @@ -655,7 +476,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { fn link_natively(sess: &Session, crate_type: config::CrateType, out_filename: &Path, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, tmpdir: &Path) { info!("preparing {:?} to {:?}", crate_type, out_filename); let (linker, flavor) = linker_and_flavor(sess); @@ -1029,7 +850,7 @@ fn link_args(cmd: &mut dyn Linker, crate_type: config::CrateType, tmpdir: &Path, out_filename: &Path, - codegen_results: &CodegenResults) { + codegen_results: &CodegenResults) { // Linker plugins should be specified early in the list of arguments cmd.cross_lang_lto(); @@ -1241,7 +1062,7 @@ fn link_args(cmd: &mut dyn Linker, // may have their native library pulled in above. fn add_local_native_libraries(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults) { + codegen_results: &CodegenResults) { sess.target_filesearch(PathKind::All).for_each_lib_search_path(|path, k| { match k { PathKind::Framework => { cmd.framework_path(path); } @@ -1276,7 +1097,7 @@ fn add_local_native_libraries(cmd: &mut dyn Linker, // the intermediate rlib version) fn add_upstream_rust_crates(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, crate_type: config::CrateType, tmpdir: &Path) { // All of the heavy lifting has previously been accomplished by the @@ -1400,7 +1221,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // linking it. fn link_sanitizer_runtime(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, tmpdir: &Path, cnum: CrateNum) { let src = &codegen_results.crate_info.used_crate_source[&cnum]; @@ -1469,7 +1290,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // we're at the end of the dependency chain. fn add_static_crate(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, tmpdir: &Path, crate_type: config::CrateType, cnum: CrateNum) { @@ -1609,7 +1430,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // also be resolved in the target crate. fn add_upstream_native_libraries(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, crate_type: config::CrateType) { // Be sure to use a topological sorting of crates because there may be // interdependencies between native libraries. When passing -nodefaultlibs, diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 2328c03b37770..d1756127ff5aa 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -9,13 +9,15 @@ // except according to those terms. use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; -use back::symbol_export; -use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; -use back::write::{self, DiagnosticHandlers, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::symbol_export; +use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinShared, ThinModule}; +use rustc_codegen_ssa::interfaces::*; +use back::write::{self, with_llvm_pmb, save_temp_bitcode, get_llvm_opt_level, + new_diagnostic_handlers}; use errors::{FatalError, Handler}; use llvm::archive_ro::ArchiveRO; use llvm::{self, True, False}; -use memmap; use rustc::dep_graph::WorkProduct; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::LOCAL_CRATE; @@ -24,7 +26,7 @@ use rustc::session::config::{self, Lto}; use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; -use ModuleLlvm; +use {ModuleLlvm, LlvmCodegenBackend}; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use libc; @@ -47,71 +49,16 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } } -pub(crate) enum LtoModuleCodegen { - Fat { - module: Option>, - _serialized_bitcode: Vec, - }, - - Thin(ThinModule), -} - -impl LtoModuleCodegen { - pub fn name(&self) -> &str { - match *self { - LtoModuleCodegen::Fat { .. } => "everything", - LtoModuleCodegen::Thin(ref m) => m.name(), - } - } - - /// Optimize this module within the given codegen context. - /// - /// This function is unsafe as it'll return a `ModuleCodegen` still - /// points to LLVM data structures owned by this `LtoModuleCodegen`. - /// It's intended that the module returned is immediately code generated and - /// dropped, and then this LTO module is dropped. - pub(crate) unsafe fn optimize(&mut self, - cgcx: &CodegenContext, - timeline: &mut Timeline) - -> Result, FatalError> - { - match *self { - LtoModuleCodegen::Fat { ref mut module, .. } => { - let module = module.take().unwrap(); - { - let config = cgcx.config(module.kind); - let llmod = module.module_llvm.llmod(); - let tm = &*module.module_llvm.tm; - run_pass_manager(cgcx, tm, llmod, config, false); - timeline.record("fat-done"); - } - Ok(module) - } - LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline), - } - } - - /// A "gauge" of how costly it is to optimize this module, used to sort - /// biggest modules first. - pub fn cost(&self) -> u64 { - match *self { - // Only one module with fat LTO, so the cost doesn't matter. - LtoModuleCodegen::Fat { .. } => 0, - LtoModuleCodegen::Thin(ref m) => m.cost(), - } - } -} - /// Performs LTO, which in the case of full LTO means merging all modules into /// a single one and returning it for further optimizing. For ThinLTO, it will /// do the global analysis necessary and return two lists, one of the modules /// the need optimization and another for modules that can simply be copied over /// from the incr. comp. cache. -pub(crate) fn run(cgcx: &CodegenContext, +pub(crate) fn run(cgcx: &CodegenContext, modules: Vec>, - cached_modules: Vec<(SerializedModule, WorkProduct)>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { let diag_handler = cgcx.create_diag_handler(); let export_threshold = match cgcx.lto { @@ -230,13 +177,13 @@ pub(crate) fn run(cgcx: &CodegenContext, } } -fn fat_lto(cgcx: &CodegenContext, +fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, mut modules: Vec>, - mut serialized_modules: Vec<(SerializedModule, CString)>, + mut serialized_modules: Vec<(SerializedModule, CString)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result, FatalError> + -> Result>, FatalError> { info!("going for a fat lto"); @@ -272,7 +219,7 @@ fn fat_lto(cgcx: &CodegenContext, // The linking steps below may produce errors and diagnostics within LLVM // which we'd like to handle and print, so set up our diagnostic handlers // (which get unregistered when they go out of scope below). - let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + let _handler = new_diagnostic_handlers(cgcx, diag_handler, llcx); // For all other modules we codegened we'll need to link them into our own // bitcode. All modules were codegened in their own LLVM context, however, @@ -303,7 +250,7 @@ fn fat_lto(cgcx: &CodegenContext, serialized_bitcode.push(bc_decoded); } drop(linker); - cgcx.save_temp_bitcode(&module, "lto.input"); + save_temp_bitcode(&cgcx, &module, "lto.input"); // Internalize everything that *isn't* in our whitelist to help strip out // more modules and such @@ -312,14 +259,14 @@ fn fat_lto(cgcx: &CodegenContext, llvm::LLVMRustRunRestrictionPass(llmod, ptr as *const *const libc::c_char, symbol_white_list.len() as libc::size_t); - cgcx.save_temp_bitcode(&module, "lto.after-restriction"); + save_temp_bitcode(&cgcx, &module, "lto.after-restriction"); } if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } - cgcx.save_temp_bitcode(&module, "lto.after-nounwind"); + save_temp_bitcode(&cgcx, &module, "lto.after-nounwind"); } timeline.record("passes"); } @@ -386,14 +333,14 @@ impl Drop for Linker<'a> { /// calculating the *index* for ThinLTO. This index will then be shared amongst /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. -fn thin_lto(cgcx: &CodegenContext, +fn thin_lto(cgcx: &CodegenContext, diag_handler: &Handler, modules: Vec>, - serialized_modules: Vec<(SerializedModule, CString)>, - cached_modules: Vec<(SerializedModule, WorkProduct)>, + serialized_modules: Vec<(SerializedModule, CString)>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { unsafe { info!("going for that thin, thin LTO"); @@ -556,9 +503,8 @@ fn thin_lto(cgcx: &CodegenContext, } } -fn run_pass_manager(cgcx: &CodegenContext, - tm: &llvm::TargetMachine, - llmod: &llvm::Module, +pub(crate) fn run_pass_manager(cgcx: &CodegenContext, + module: &ModuleCodegen, config: &ModuleConfig, thin: bool) { // Now we have one massive module inside of llmod. Time to run the @@ -569,7 +515,7 @@ fn run_pass_manager(cgcx: &CodegenContext, debug!("running the pass manager"); unsafe { let pm = llvm::LLVMCreatePassManager(); - llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod); + llvm::LLVMRustAddAnalysisPasses(module.module_llvm.tm, pm, module.module_llvm.llmod()); if config.verify_llvm_ir { let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); @@ -588,12 +534,13 @@ fn run_pass_manager(cgcx: &CodegenContext, // Note that in general this shouldn't matter too much as you typically // only turn on ThinLTO when you're compiling with optimizations // otherwise. - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let opt_level = match opt_level { llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less, level => level, }; - with_llvm_pmb(llmod, config, opt_level, false, &mut |b| { + with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| { if thin { if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) { panic!("this version of LLVM does not support ThinLTO"); @@ -611,29 +558,13 @@ fn run_pass_manager(cgcx: &CodegenContext, } time_ext(cgcx.time_passes, None, "LTO passes", || - llvm::LLVMRunPassManager(pm, llmod)); + llvm::LLVMRunPassManager(pm, module.module_llvm.llmod())); llvm::LLVMDisposePassManager(pm); } debug!("lto done"); } -pub enum SerializedModule { - Local(ModuleBuffer), - FromRlib(Vec), - FromUncompressedFile(memmap::Mmap), -} - -impl SerializedModule { - fn data(&self) -> &[u8] { - match *self { - SerializedModule::Local(ref m) => m.data(), - SerializedModule::FromRlib(ref m) => m, - SerializedModule::FromUncompressedFile(ref m) => m, - } - } -} - pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); unsafe impl Send for ModuleBuffer {} @@ -645,8 +576,10 @@ impl ModuleBuffer { llvm::LLVMRustModuleBufferCreate(m) }) } +} - pub fn data(&self) -> &[u8] { +impl ModuleBufferMethods for ModuleBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustModuleBufferPtr(self.0); let len = llvm::LLVMRustModuleBufferLen(self.0); @@ -661,19 +594,7 @@ impl Drop for ModuleBuffer { } } -pub struct ThinModule { - shared: Arc, - idx: usize, -} - -struct ThinShared { - data: ThinData, - thin_buffers: Vec, - serialized_modules: Vec, - module_names: Vec, -} - -struct ThinData(&'static mut llvm::ThinLTOData); +pub struct ThinData(&'static mut llvm::ThinLTOData); unsafe impl Send for ThinData {} unsafe impl Sync for ThinData {} @@ -698,8 +619,10 @@ impl ThinBuffer { ThinBuffer(buffer) } } +} - pub fn data(&self) -> &[u8] { +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; let len = llvm::LLVMRustThinLTOBufferLen(self.0); @@ -716,161 +639,142 @@ impl Drop for ThinBuffer { } } -impl ThinModule { - fn name(&self) -> &str { - self.shared.module_names[self.idx].to_str().unwrap() - } - - fn cost(&self) -> u64 { - // Yes, that's correct, we're using the size of the bytecode as an - // indicator for how costly this codegen unit is. - self.data().len() as u64 - } - - fn data(&self) -> &[u8] { - let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); - a.unwrap_or_else(|| { - let len = self.shared.thin_buffers.len(); - self.shared.serialized_modules[self.idx - len].data() - }) - } - - unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) - -> Result, FatalError> - { - let diag_handler = cgcx.create_diag_handler(); - let tm = (cgcx.tm_factory)().map_err(|e| { - write::llvm_err(&diag_handler, e) - })?; - - // Right now the implementation we've got only works over serialized - // modules, so we create a fresh new LLVM context and parse the module - // into that context. One day, however, we may do this for upstream - // crates but for locally codegened modules we may be able to reuse - // that LLVM Context and Module. - let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( +pub unsafe fn optimize_thin_module( + thin_module: &mut ThinModule, + cgcx: &CodegenContext, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + let tm = (cgcx.tm_factory)().map_err(|e| { + write::llvm_err(&diag_handler, e) + })?; + + // Right now the implementation we've got only works over serialized + // modules, so we create a fresh new LLVM context and parse the module + // into that context. One day, however, we may do this for upstream + // crates but for locally codegened modules we may be able to reuse + // that LLVM Context and Module. + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + llcx, + thin_module.data().as_ptr(), + thin_module.data().len(), + thin_module.shared.module_names[thin_module.idx].as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for thin LTO module".to_string(); + write::llvm_err(&diag_handler, msg) + })? as *const _; + let module = ModuleCodegen { + module_llvm: ModuleLlvm { + llmod_raw, llcx, - self.data().as_ptr(), - self.data().len(), - self.shared.module_names[self.idx].as_ptr(), - ).ok_or_else(|| { - let msg = "failed to parse bitcode for thin LTO module".to_string(); - write::llvm_err(&diag_handler, msg) - })? as *const _; - let module = ModuleCodegen { - module_llvm: ModuleLlvm { - llmod_raw, - llcx, - tm, - }, - name: self.name().to_string(), - kind: ModuleKind::Regular, - }; - { - let llmod = module.module_llvm.llmod(); - cgcx.save_temp_bitcode(&module, "thin-lto-input"); - - // Before we do much else find the "main" `DICompileUnit` that we'll be - // using below. If we find more than one though then rustc has changed - // in a way we're not ready for, so generate an ICE by returning - // an error. - let mut cu1 = ptr::null_mut(); - let mut cu2 = ptr::null_mut(); - llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); - if !cu2.is_null() { - let msg = "multiple source DICompileUnits found".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } + tm, + }, + name: thin_module.name().to_string(), + kind: ModuleKind::Regular, + }; + { + let llmod = module.module_llvm.llmod(); + save_temp_bitcode(&cgcx, &module, "thin-lto-input"); + + // Before we do much else find the "main" `DICompileUnit` that we'll be + // using below. If we find more than one though then rustc has changed + // in a way we're not ready for, so generate an ICE by returning + // an error. + let mut cu1 = ptr::null_mut(); + let mut cu2 = ptr::null_mut(); + llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); + if !cu2.is_null() { + let msg = "multiple source DICompileUnits found".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } - // Like with "fat" LTO, get some better optimizations if landing pads - // are disabled by removing all landing pads. - if cgcx.no_landing_pads { - llvm::LLVMRustMarkAllFunctionsNounwind(llmod); - cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind"); - timeline.record("nounwind"); - } + // Like with "fat" LTO, get some better optimizations if landing pads + // are disabled by removing all landing pads. + if cgcx.no_landing_pads { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind"); + timeline.record("nounwind"); + } - // Up next comes the per-module local analyses that we do for Thin LTO. - // Each of these functions is basically copied from the LLVM - // implementation and then tailored to suit this implementation. Ideally - // each of these would be supported by upstream LLVM but that's perhaps - // a patch for another day! - // - // You can find some more comments about these functions in the LLVM - // bindings we've got (currently `PassWrapper.cpp`) - if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-rename"); - timeline.record("rename"); - if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve"); - timeline.record("resolve"); - if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize"); - timeline.record("internalize"); - if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-import"); - timeline.record("import"); - - // Ok now this is a bit unfortunate. This is also something you won't - // find upstream in LLVM's ThinLTO passes! This is a hack for now to - // work around bugs in LLVM. - // - // First discovered in #45511 it was found that as part of ThinLTO - // importing passes LLVM will import `DICompileUnit` metadata - // information across modules. This means that we'll be working with one - // LLVM module that has multiple `DICompileUnit` instances in it (a - // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of - // bugs in LLVM's backend which generates invalid DWARF in a situation - // like this: - // - // https://bugs.llvm.org/show_bug.cgi?id=35212 - // https://bugs.llvm.org/show_bug.cgi?id=35562 - // - // While the first bug there is fixed the second ended up causing #46346 - // which was basically a resurgence of #45511 after LLVM's bug 35212 was - // fixed. - // - // This function below is a huge hack around this problem. The function - // below is defined in `PassWrapper.cpp` and will basically "merge" - // all `DICompileUnit` instances in a module. Basically it'll take all - // the objects, rewrite all pointers of `DISubprogram` to point to the - // first `DICompileUnit`, and then delete all the other units. - // - // This is probably mangling to the debug info slightly (but hopefully - // not too much) but for now at least gets LLVM to emit valid DWARF (or - // so it appears). Hopefully we can remove this once upstream bugs are - // fixed in LLVM. - llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); - cgcx.save_temp_bitcode(&module, "thin-lto-after-patch"); - timeline.record("patch"); - - // Alright now that we've done everything related to the ThinLTO - // analysis it's time to run some optimizations! Here we use the same - // `run_pass_manager` as the "fat" LTO above except that we tell it to - // populate a thin-specific pass manager, which presumably LLVM treats a - // little differently. - info!("running thin lto passes over {}", module.name); - let config = cgcx.config(module.kind); - run_pass_manager(cgcx, module.module_llvm.tm, llmod, config, true); - cgcx.save_temp_bitcode(&module, "thin-lto-after-pm"); - timeline.record("thin-done"); + // Up next comes the per-module local analyses that we do for Thin LTO. + // Each of these functions is basically copied from the LLVM + // implementation and then tailored to suit this implementation. Ideally + // each of these would be supported by upstream LLVM but that's perhaps + // a patch for another day! + // + // You can find some more comments about these functions in the LLVM + // bindings we've got (currently `PassWrapper.cpp`) + if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) } + save_temp_bitcode(&cgcx, &module, "thin-lto-after-rename"); + timeline.record("rename"); + if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(&cgcx, &module, "thin-lto-after-resolve"); + timeline.record("resolve"); + if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(&cgcx, &module, "thin-lto-after-internalize"); + timeline.record("internalize"); + if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(&cgcx, &module, "thin-lto-after-import"); + timeline.record("import"); - Ok(module) + // Ok now this is a bit unfortunate. This is also something you won't + // find upstream in LLVM's ThinLTO passes! This is a hack for now to + // work around bugs in LLVM. + // + // First discovered in #45511 it was found that as part of ThinLTO + // importing passes LLVM will import `DICompileUnit` metadata + // information across modules. This means that we'll be working with one + // LLVM module that has multiple `DICompileUnit` instances in it (a + // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of + // bugs in LLVM's backend which generates invalid DWARF in a situation + // like this: + // + // https://bugs.llvm.org/show_bug.cgi?id=35212 + // https://bugs.llvm.org/show_bug.cgi?id=35562 + // + // While the first bug there is fixed the second ended up causing #46346 + // which was basically a resurgence of #45511 after LLVM's bug 35212 was + // fixed. + // + // This function below is a huge hack around this problem. The function + // below is defined in `PassWrapper.cpp` and will basically "merge" + // all `DICompileUnit` instances in a module. Basically it'll take all + // the objects, rewrite all pointers of `DISubprogram` to point to the + // first `DICompileUnit`, and then delete all the other units. + // + // This is probably mangling to the debug info slightly (but hopefully + // not too much) but for now at least gets LLVM to emit valid DWARF (or + // so it appears). Hopefully we can remove this once upstream bugs are + // fixed in LLVM. + llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); + save_temp_bitcode(&cgcx, &module, "thin-lto-after-patch"); + timeline.record("patch"); + + // Alright now that we've done everything related to the ThinLTO + // analysis it's time to run some optimizations! Here we use the same + // `run_pass_manager` as the "fat" LTO above except that we tell it to + // populate a thin-specific pass manager, which presumably LLVM treats a + // little differently. + info!("running thin lto passes over {}", module.name); + let config = cgcx.config(module.kind); + run_pass_manager(cgcx, &module, config, true); + save_temp_bitcode(&cgcx, &module, "thin-lto-after-pm"); + timeline.record("thin-done"); } + Ok(module) } #[derive(Debug, Default)] diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index c84f79549a488..d7415a82681ed 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -10,57 +10,36 @@ use attributes; use back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; -use back::lto::{self, ModuleBuffer, ThinBuffer, SerializedModule}; -use back::link::{self, get_linker, remove}; -use back::command::Command; -use back::linker::LinkerInfo; -use back::symbol_export::ExportedSymbols; +use back::lto::{ModuleBuffer, ThinBuffer}; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler, + DiagnosticHandlers}; +use rustc_codegen_ssa::interfaces::*; use base; use consts; -use memmap; -use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, - in_incr_comp_dir, in_incr_comp_dir_sess}; -use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; -use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; -use rustc::middle::cstore::EncodedMetadata; -use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::config::{self, OutputType, Passes, Lto}; use rustc::session::Session; -use rustc::util::nodemap::FxHashMap; -use time_graph::{self, TimeGraph, Timeline}; +use time_graph::Timeline; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; -use {CodegenResults, ModuleLlvm}; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo}; -use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; -use rustc::ty::TyCtxt; -use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry}; +use ModuleLlvm; +use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; +use rustc::util::common::time_ext; use rustc_fs_util::{path2cstr, link_or_copy}; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::svh::Svh; -use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; -use errors::emitter::{Emitter}; -use syntax::attr; -use syntax::ext::hygiene::Mark; -use syntax_pos::MultiSpan; -use syntax_pos::symbol::Symbol; +use errors::{self, Handler, FatalError}; use type_::Type; use context::{is_pie_binary, get_reloc_model}; use common; -use jobserver::{Client, Acquired}; +use LlvmCodegenBackend; use rustc_demangle; -use std::any::Any; use std::ffi::{CString, CStr}; use std::fs; use std::io::{self, Write}; -use std::mem; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::str; use std::sync::Arc; -use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; -use std::time::Instant; -use std::thread; use libc::{c_uint, c_void, c_char, size_t}; pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ @@ -87,8 +66,6 @@ pub const TLS_MODEL_ARGS : [(&'static str, llvm::ThreadLocalMode); 4] = [ ("local-exec", llvm::ThreadLocalMode::LocalExec), ]; -const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; - pub fn llvm_err(handler: &errors::Handler, msg: String) -> FatalError { match llvm::last_error() { Some(err) => handler.fatal(&format!("{}: {}", msg, err)), @@ -116,7 +93,7 @@ pub fn write_output_file( } } -fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { +pub(crate) fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { match optimize { config::OptLevel::No => llvm::CodeGenOptLevel::None, config::OptLevel::Less => llvm::CodeGenOptLevel::Less, @@ -126,7 +103,7 @@ fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { } } -fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { +pub(crate) fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { match optimize { config::OptLevel::Size => llvm::CodeGenOptSizeDefault, config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, @@ -224,230 +201,45 @@ pub fn target_machine_factory(sess: &Session, find_features: bool) }) } -/// Module-specific configuration for `optimize_and_codegen`. -pub struct ModuleConfig { - /// Names of additional optimization passes to run. - passes: Vec, - /// Some(level) to optimize at a certain level, or None to run - /// absolutely no optimizations (used for the metadata module). - pub opt_level: Option, - - /// Some(level) to optimize binary size, or None to not affect program size. - opt_size: Option, - - pgo_gen: Option, - pgo_use: String, - - // Flags indicating which outputs to produce. - pub emit_pre_thin_lto_bc: bool, - emit_no_opt_bc: bool, - emit_bc: bool, - emit_bc_compressed: bool, - emit_lto_bc: bool, - emit_ir: bool, - emit_asm: bool, - emit_obj: bool, - // Miscellaneous flags. These are mostly copied from command-line - // options. - pub verify_llvm_ir: bool, - no_prepopulate_passes: bool, - no_builtins: bool, - time_passes: bool, - vectorize_loop: bool, - vectorize_slp: bool, - merge_functions: bool, - inline_threshold: Option, - // Instead of creating an object file by doing LLVM codegen, just - // make the object file bitcode. Provides easy compatibility with - // emscripten's ecc compiler, when used as the linker. - obj_is_bitcode: bool, - no_integrated_as: bool, - embed_bitcode: bool, - embed_bitcode_marker: bool, -} - -impl ModuleConfig { - fn new(passes: Vec) -> ModuleConfig { - ModuleConfig { - passes, - opt_level: None, - opt_size: None, - - pgo_gen: None, - pgo_use: String::new(), - - emit_no_opt_bc: false, - emit_pre_thin_lto_bc: false, - emit_bc: false, - emit_bc_compressed: false, - emit_lto_bc: false, - emit_ir: false, - emit_asm: false, - emit_obj: false, - obj_is_bitcode: false, - embed_bitcode: false, - embed_bitcode_marker: false, - no_integrated_as: false, - - verify_llvm_ir: false, - no_prepopulate_passes: false, - no_builtins: false, - time_passes: false, - vectorize_loop: false, - vectorize_slp: false, - merge_functions: false, - inline_threshold: None - } - } - - fn set_flags(&mut self, sess: &Session, no_builtins: bool) { - self.verify_llvm_ir = sess.verify_llvm_ir(); - self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; - self.no_builtins = no_builtins || sess.target.target.options.no_builtins; - self.time_passes = sess.time_passes(); - self.inline_threshold = sess.opts.cg.inline_threshold; - self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || - sess.opts.debugging_opts.cross_lang_lto.enabled(); - let embed_bitcode = sess.target.target.options.embed_bitcode || - sess.opts.debugging_opts.embed_bitcode; - if embed_bitcode { - match sess.opts.optimize { - config::OptLevel::No | - config::OptLevel::Less => { - self.embed_bitcode_marker = embed_bitcode; - } - _ => self.embed_bitcode = embed_bitcode, - } - } - - // Copy what clang does by turning on loop vectorization at O2 and - // slp vectorization at O3. Otherwise configure other optimization aspects - // of this pass manager builder. - // Turn off vectorization for emscripten, as it's not very well supported. - self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && - (sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive) && - !sess.target.target.options.is_like_emscripten; - - self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && - sess.opts.optimize == config::OptLevel::Aggressive && - !sess.target.target.options.is_like_emscripten; - - self.merge_functions = sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive; - } -} - -/// Assembler name and command used by codegen when no_integrated_as is enabled -struct AssemblerCommand { - name: PathBuf, - cmd: Command, -} - -/// Additional resources used by optimize_and_codegen (not module specific) -#[derive(Clone)] -pub struct CodegenContext { - // Resources needed when running LTO - pub time_passes: bool, - pub lto: Lto, - pub no_landing_pads: bool, - pub save_temps: bool, - pub fewer_names: bool, - pub exported_symbols: Option>, - pub opts: Arc, - pub crate_types: Vec, - pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, - output_filenames: Arc, - regular_module_config: Arc, - metadata_module_config: Arc, - allocator_module_config: Arc, - pub tm_factory: Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync>, - pub msvc_imps_needed: bool, - pub target_pointer_width: String, - debuginfo: config::DebugInfo, - - // Number of cgus excluding the allocator/metadata modules - pub total_cgus: usize, - // Handler to use for diagnostics produced during codegen. - pub diag_emitter: SharedEmitter, - // LLVM passes added by plugins. - pub plugin_passes: Vec, - // LLVM optimizations for which we want to print remarks. - pub remark: Passes, - // Worker thread number - pub worker: usize, - // The incremental compilation session directory, or None if we are not - // compiling incrementally - pub incr_comp_session_dir: Option, - // Used to update CGU re-use information during the thinlto phase. - pub cgu_reuse_tracker: CguReuseTracker, - // Channel back to the main control thread to send messages to - coordinator_send: Sender>, - // A reference to the TimeGraph so we can register timings. None means that - // measuring is disabled. - time_graph: Option, - // The assembler command if no_integrated_as option is enabled, None otherwise - assembler_cmd: Option> -} - -impl CodegenContext { - pub fn create_diag_handler(&self) -> Handler { - Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) - } - - pub(crate) fn config(&self, kind: ModuleKind) -> &ModuleConfig { - match kind { - ModuleKind::Regular => &self.regular_module_config, - ModuleKind::Metadata => &self.metadata_module_config, - ModuleKind::Allocator => &self.allocator_module_config, - } +pub(crate) fn save_temp_bitcode( + cgcx: &CodegenContext, + module: &ModuleCodegen, + name: &str +) { + if !cgcx.save_temps { + return } - - pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { - if !self.save_temps { - return - } - unsafe { - let ext = format!("{}.bc", name); - let cgu = Some(&module.name[..]); - let path = self.output_filenames.temp_path_ext(&ext, cgu); - let cstr = path2cstr(&path); - let llmod = module.module_llvm.llmod(); - llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); - } + unsafe { + let ext = format!("{}.bc", name); + let cgu = Some(&module.name[..]); + let path = cgcx.output_filenames.temp_path_ext(&ext, cgu); + let cstr = path2cstr(&path); + let llmod = module.module_llvm.llmod(); + llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); } } -pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a Handler), - llcx: &'a llvm::Context, -} - -impl<'a> DiagnosticHandlers<'a> { - pub fn new(cgcx: &'a CodegenContext, - handler: &'a Handler, - llcx: &'a llvm::Context) -> Self { - let data = Box::into_raw(Box::new((cgcx, handler))); - unsafe { - llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data as *mut _); - llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data as *mut _); - } - DiagnosticHandlers { data, llcx } +pub fn new_diagnostic_handlers<'a>(cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a llvm::Context) -> DiagnosticHandlers<'a, LlvmCodegenBackend> { + let data = Box::into_raw(Box::new((cgcx, handler))); + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data as *mut _); + llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data as *mut _); } + DiagnosticHandlers { data, llcx: llcx } } -impl<'a> Drop for DiagnosticHandlers<'a> { - fn drop(&mut self) { - use std::ptr::null_mut; - unsafe { - llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut()); - llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut()); - drop(Box::from_raw(self.data)); - } +pub fn drop_diagnostic_handlers(diag: &mut DiagnosticHandlers<'a, LlvmCodegenBackend>) { + use std::ptr::null_mut; + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(diag.llcx, inline_asm_handler, null_mut()); + llvm::LLVMContextSetDiagnosticHandler(diag.llcx, diagnostic_handler, null_mut()); + drop(Box::from_raw(diag.data)); } } -unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, msg: &'b str, cookie: c_uint) { cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); @@ -459,7 +251,7 @@ unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, if user.is_null() { return } - let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) .expect("non-UTF8 SMDiagnostic"); @@ -471,7 +263,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void if user.is_null() { return } - let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -508,7 +300,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void } // Unsafe due to LLVM calls. -unsafe fn optimize(cgcx: &CodegenContext, +pub(crate) unsafe fn optimize(cgcx: &CodegenContext, diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig, @@ -518,7 +310,7 @@ unsafe fn optimize(cgcx: &CodegenContext, let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; let tm = &*module.module_llvm.tm; - let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + let _handlers = new_diagnostic_handlers(cgcx, diag_handler, llcx); let module_name = module.name.clone(); let module_name = Some(&module_name[..]); @@ -569,7 +361,8 @@ unsafe fn optimize(cgcx: &CodegenContext, if !config.no_prepopulate_passes { llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod); llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod); - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || (cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled()); have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; @@ -642,35 +435,8 @@ unsafe fn optimize(cgcx: &CodegenContext, Ok(()) } -fn generate_lto_work(cgcx: &CodegenContext, - modules: Vec>, - import_only_modules: Vec<(SerializedModule, WorkProduct)>) - -> Vec<(WorkItem, u64)> -{ - let mut timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(CODEGEN_WORKER_TIMELINE, - CODEGEN_WORK_PACKAGE_KIND, - "generate lto") - }).unwrap_or(Timeline::noop()); - let (lto_modules, copy_jobs) = lto::run(cgcx, modules, import_only_modules, &mut timeline) - .unwrap_or_else(|e| e.raise()); - - let lto_modules = lto_modules.into_iter().map(|module| { - let cost = module.cost(); - (WorkItem::LTO(module), cost) - }); - - let copy_jobs = copy_jobs.into_iter().map(|wp| { - (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { - name: wp.cgu_name.clone(), - source: wp, - }), 0) - }); - - lto_modules.chain(copy_jobs).collect() -} -unsafe fn codegen(cgcx: &CodegenContext, +pub(crate) unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig, @@ -684,7 +450,7 @@ unsafe fn codegen(cgcx: &CodegenContext, let tm = &*module.module_llvm.tm; let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + let handlers = new_diagnostic_handlers(cgcx, diag_handler, llcx); if cgcx.msvc_imps_needed { create_msvc_imps(cgcx, llcx, llmod); @@ -884,7 +650,7 @@ unsafe fn codegen(cgcx: &CodegenContext, /// /// Basically all of this is us attempting to follow in the footsteps of clang /// on iOS. See #35968 for lots more info. -unsafe fn embed_bitcode(cgcx: &CodegenContext, +unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { @@ -924,1259 +690,7 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); } -pub(crate) struct CompiledModules { - pub modules: Vec, - pub metadata_module: CompiledModule, - pub allocator_module: Option, -} - -fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { - sess.crate_types.borrow().contains(&config::CrateType::Rlib) && - sess.opts.output_types.contains_key(&OutputType::Exe) -} - -fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { - if sess.opts.incremental.is_none() { - return false - } - - match sess.lto() { - Lto::Fat | - Lto::No => false, - Lto::Thin | - Lto::ThinLocal => true, - } -} - -pub fn start_async_codegen(tcx: TyCtxt, - time_graph: Option, - metadata: EncodedMetadata, - coordinator_receive: Receiver>, - total_cgus: usize) - -> OngoingCodegen { - let sess = tcx.sess; - let crate_name = tcx.crate_name(LOCAL_CRATE); - let crate_hash = tcx.crate_hash(LOCAL_CRATE); - let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); - let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); - } - subsystem.to_string() - }); - - let linker_info = LinkerInfo::new(tcx); - let crate_info = CrateInfo::new(tcx); - - // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(vec![]); - let mut allocator_config = ModuleConfig::new(vec![]); - - if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { - match *sanitizer { - Sanitizer::Address => { - modules_config.passes.push("asan".to_owned()); - modules_config.passes.push("asan-module".to_owned()); - } - Sanitizer::Memory => { - modules_config.passes.push("msan".to_owned()) - } - Sanitizer::Thread => { - modules_config.passes.push("tsan".to_owned()) - } - _ => {} - } - } - - if sess.opts.debugging_opts.profile { - modules_config.passes.push("insert-gcov-profiling".to_owned()) - } - - modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); - modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); - - modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize)); - modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize)); - - // Save all versions of the bytecode if we're saving our temporaries. - if sess.opts.cg.save_temps { - modules_config.emit_no_opt_bc = true; - modules_config.emit_pre_thin_lto_bc = true; - modules_config.emit_bc = true; - modules_config.emit_lto_bc = true; - metadata_config.emit_bc = true; - allocator_config.emit_bc = true; - } - - // Emit compressed bitcode files for the crate if we're emitting an rlib. - // Whenever an rlib is created, the bitcode is inserted into the archive in - // order to allow LTO against it. - if need_crate_bitcode_for_rlib(sess) { - modules_config.emit_bc_compressed = true; - allocator_config.emit_bc_compressed = true; - } - - modules_config.emit_pre_thin_lto_bc = - need_pre_thin_lto_bitcode_for_incr_comp(sess); - - modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || - tcx.sess.target.target.options.no_integrated_as; - - for output_type in sess.opts.output_types.keys() { - match *output_type { - OutputType::Bitcode => { modules_config.emit_bc = true; } - OutputType::LlvmAssembly => { modules_config.emit_ir = true; } - OutputType::Assembly => { - modules_config.emit_asm = true; - // If we're not using the LLVM assembler, this function - // could be invoked specially with output_type_assembly, so - // in this case we still want the metadata object file. - if !sess.opts.output_types.contains_key(&OutputType::Assembly) { - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - } - } - OutputType::Object => { modules_config.emit_obj = true; } - OutputType::Metadata => { metadata_config.emit_obj = true; } - OutputType::Exe => { - modules_config.emit_obj = true; - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - }, - OutputType::Mir => {} - OutputType::DepInfo => {} - } - } - - modules_config.set_flags(sess, no_builtins); - metadata_config.set_flags(sess, no_builtins); - allocator_config.set_flags(sess, no_builtins); - - // Exclude metadata and allocator modules from time_passes output, since - // they throw off the "LLVM passes" measurement. - metadata_config.time_passes = false; - allocator_config.time_passes = false; - - let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let (codegen_worker_send, codegen_worker_receive) = channel(); - - let coordinator_thread = start_executing_work(tcx, - &crate_info, - shared_emitter, - codegen_worker_send, - coordinator_receive, - total_cgus, - sess.jobserver.clone(), - time_graph.clone(), - Arc::new(modules_config), - Arc::new(metadata_config), - Arc::new(allocator_config)); - - OngoingCodegen { - crate_name, - crate_hash, - metadata, - windows_subsystem, - linker_info, - crate_info, - - time_graph, - coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), - codegen_worker_receive, - shared_emitter_main, - future: coordinator_thread, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - } -} - -fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( - sess: &Session, - compiled_modules: &CompiledModules, -) -> FxHashMap { - let mut work_products = FxHashMap::default(); - - if sess.opts.incremental.is_none() { - return work_products; - } - - for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { - let mut files = vec![]; - - if let Some(ref path) = module.object { - files.push((WorkProductFileKind::Object, path.clone())); - } - if let Some(ref path) = module.bytecode { - files.push((WorkProductFileKind::Bytecode, path.clone())); - } - if let Some(ref path) = module.bytecode_compressed { - files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); - } - - if let Some((id, product)) = - copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { - work_products.insert(id, product); - } - } - - work_products -} - -fn produce_final_output_artifacts(sess: &Session, - compiled_modules: &CompiledModules, - crate_output: &OutputFilenames) { - let mut user_wants_bitcode = false; - let mut user_wants_objects = false; - - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &Path| { - if let Err(e) = fs::copy(from, to) { - sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); - } - }; - - let copy_if_one_unit = |output_type: OutputType, - keep_numbered: bool| { - if compiled_modules.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&compiled_modules.modules[0].name[..]); - let path = crate_output.temp_path(output_type, module_name); - copy_gracefully(&path, - &crate_output.path(output_type)); - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - remove(sess, &path); - } - } else { - let ext = crate_output.temp_path(output_type, None) - .extension() - .unwrap() - .to_str() - .unwrap() - .to_owned(); - - if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; - - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in crate_output.outputs.keys() { - match *output_type { - OutputType::Bitcode => { - user_wants_bitcode = true; - // Copy to .bc, but always keep the .0.bc. There is a later - // check to figure out if we should delete .0.bc files, or keep - // them for making an rlib. - copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | - OutputType::Metadata | - OutputType::Exe | - OutputType::DepInfo => {} - } - } - - // Clean up unwanted temporary files. - - // We create the following files by default: - // - #crate#.#module-name#.bc - // - #crate#.#module-name#.o - // - #crate#.crate.metadata.bc - // - #crate#.crate.metadata.o - // - #crate#.o (linked from crate.##.o) - // - #crate#.bc (copied from crate.##.bc) - // We may create additional files if requested by the user (through - // `-C save-temps` or `--emit=` flags). - - if !sess.opts.cg.save_temps { - // Remove the temporary .#module-name#.o objects. If the user didn't - // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .#module-name#.bc as - // well. - - // Specific rules for keeping .#module-name#.bc: - // - If the user requested bitcode (`user_wants_bitcode`), and - // codegen_units > 1, then keep it. - // - If the user requested bitcode but codegen_units == 1, then we - // can toss .#module-name#.bc because we copied it to .bc earlier. - // - If we're not building an rlib and the user didn't request - // bitcode, then delete .#module-name#.bc. - // If you change how this works, also update back::link::link_rlib, - // where .#module-name#.bc files are (maybe) deleted after making an - // rlib. - let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); - - let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; - - let keep_numbered_objects = needs_crate_object || - (user_wants_objects && sess.codegen_units() > 1); - - for module in compiled_modules.modules.iter() { - if let Some(ref path) = module.object { - if !keep_numbered_objects { - remove(sess, path); - } - } - - if let Some(ref path) = module.bytecode { - if !keep_numbered_bitcode { - remove(sess, path); - } - } - } - - if !user_wants_bitcode { - if let Some(ref path) = compiled_modules.metadata_module.bytecode { - remove(sess, &path); - } - - if let Some(ref allocator_module) = compiled_modules.allocator_module { - if let Some(ref path) = allocator_module.bytecode { - remove(sess, path); - } - } - } - } - - // We leave the following files around by default: - // - #crate#.o - // - #crate#.crate.metadata.o - // - #crate#.bc - // These are used in linking steps and will be cleaned up afterward. -} - -pub(crate) fn dump_incremental_data(_codegen_results: &CodegenResults) { - // FIXME(mw): This does not work at the moment because the situation has - // become more complicated due to incremental LTO. Now a CGU - // can have more than two caching states. - // println!("[incremental] Re-using {} out of {} modules", - // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), - // codegen_results.modules.len()); -} - -enum WorkItem { - /// Optimize a newly codegened, totally unoptimized module. - Optimize(ModuleCodegen), - /// Copy the post-LTO artifacts from the incremental cache to the output - /// directory. - CopyPostLtoArtifacts(CachedModuleCodegen), - /// Perform (Thin)LTO on the given module. - LTO(lto::LtoModuleCodegen), -} - -impl WorkItem { - fn module_kind(&self) -> ModuleKind { - match *self { - WorkItem::Optimize(ref m) => m.kind, - WorkItem::CopyPostLtoArtifacts(_) | - WorkItem::LTO(_) => ModuleKind::Regular, - } - } - - fn name(&self) -> String { - match *self { - WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), - WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), - WorkItem::LTO(ref m) => format!("lto: {}", m.name()), - } - } -} - -enum WorkItemResult { - Compiled(CompiledModule), - NeedsLTO(ModuleCodegen), -} - -fn execute_work_item(cgcx: &CodegenContext, - work_item: WorkItem, - timeline: &mut Timeline) - -> Result -{ - let module_config = cgcx.config(work_item.module_kind()); - - match work_item { - WorkItem::Optimize(module) => { - execute_optimize_work_item(cgcx, module, module_config, timeline) - } - WorkItem::CopyPostLtoArtifacts(module) => { - execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) - } - WorkItem::LTO(module) => { - execute_lto_work_item(cgcx, module, module_config, timeline) - } - } -} - -fn execute_optimize_work_item(cgcx: &CodegenContext, - module: ModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - optimize(cgcx, &diag_handler, &module, module_config, timeline)?; - } - - let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); - - // After we've done the initial round of optimizations we need to - // decide whether to synchronously codegen this module or ship it - // back to the coordinator thread for further LTO processing (which - // has to wait for all the initial modules to be optimized). - // - // Here we dispatch based on the `cgcx.lto` and kind of module we're - // codegenning... - let needs_lto = match cgcx.lto { - Lto::No => false, - - // If the linker does LTO, we don't have to do it. Note that we - // keep doing full LTO, if it is requested, as not to break the - // assumption that the output will be a single module. - Lto::Thin | Lto::ThinLocal if linker_does_lto => false, - - // Here we've got a full crate graph LTO requested. We ignore - // this, however, if the crate type is only an rlib as there's - // no full crate graph to process, that'll happen later. - // - // This use case currently comes up primarily for targets that - // require LTO so the request for LTO is always unconditionally - // passed down to the backend, but we don't actually want to do - // anything about it yet until we've got a final product. - Lto::Fat | Lto::Thin => { - cgcx.crate_types.len() != 1 || - cgcx.crate_types[0] != config::CrateType::Rlib - } - - // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator modules if - // it shows up. This is due to various linker shenanigans that - // we'll encounter later. - // - // Additionally here's where we also factor in the current LLVM - // version. If it doesn't support ThinLTO we skip this. - Lto::ThinLocal => { - module.kind != ModuleKind::Allocator && - unsafe { llvm::LLVMRustThinLTOAvailable() } - } - }; - - // Metadata modules never participate in LTO regardless of the lto - // settings. - let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; - - if needs_lto { - Ok(WorkItemResult::NeedsLTO(module)) - } else { - let module = unsafe { - codegen(cgcx, &diag_handler, module, module_config, timeline)? - }; - Ok(WorkItemResult::Compiled(module)) - } -} - -fn execute_copy_from_cache_work_item(cgcx: &CodegenContext, - module: CachedModuleCodegen, - module_config: &ModuleConfig, - _: &mut Timeline) - -> Result -{ - let incr_comp_session_dir = cgcx.incr_comp_session_dir - .as_ref() - .unwrap(); - let mut object = None; - let mut bytecode = None; - let mut bytecode_compressed = None; - for (kind, saved_file) in &module.source.saved_files { - let obj_out = match kind { - WorkProductFileKind::Object => { - let path = cgcx.output_filenames.temp_path(OutputType::Object, - Some(&module.name)); - object = Some(path.clone()); - path - } - WorkProductFileKind::Bytecode => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)); - bytecode = Some(path.clone()); - path - } - WorkProductFileKind::BytecodeCompressed => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)) - .with_extension(RLIB_BYTECODE_EXTENSION); - bytecode_compressed = Some(path.clone()); - path - } - }; - let source_file = in_incr_comp_dir(&incr_comp_session_dir, - &saved_file); - debug!("copying pre-existing module `{}` from {:?} to {}", - module.name, - source_file, - obj_out.display()); - match link_or_copy(&source_file, &obj_out) { - Ok(_) => { } - Err(err) => { - let diag_handler = cgcx.create_diag_handler(); - diag_handler.err(&format!("unable to copy {} to {}: {}", - source_file.display(), - obj_out.display(), - err)); - } - } - } - - assert_eq!(object.is_some(), module_config.emit_obj); - assert_eq!(bytecode.is_some(), module_config.emit_bc); - assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); - - Ok(WorkItemResult::Compiled(CompiledModule { - name: module.name, - kind: ModuleKind::Regular, - object, - bytecode, - bytecode_compressed, - })) -} - -fn execute_lto_work_item(cgcx: &CodegenContext, - mut module: lto::LtoModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - let module = module.optimize(cgcx, timeline)?; - let module = codegen(cgcx, &diag_handler, module, module_config, timeline)?; - Ok(WorkItemResult::Compiled(module)) - } -} - -enum Message { - Token(io::Result), - NeedsLTO { - result: ModuleCodegen, - worker_id: usize, - }, - Done { - result: Result, - worker_id: usize, - }, - CodegenDone { - llvm_work_item: WorkItem, - cost: u64, - }, - AddImportOnlyModule { - module_data: SerializedModule, - work_product: WorkProduct, - }, - CodegenComplete, - CodegenItem, -} - -struct Diagnostic { - msg: String, - code: Option, - lvl: Level, -} - -#[derive(PartialEq, Clone, Copy, Debug)] -enum MainThreadWorkerState { - Idle, - Codegenning, - LLVMing, -} - -fn start_executing_work(tcx: TyCtxt, - crate_info: &CrateInfo, - shared_emitter: SharedEmitter, - codegen_worker_send: Sender, - coordinator_receive: Receiver>, - total_cgus: usize, - jobserver: Client, - time_graph: Option, - modules_config: Arc, - metadata_config: Arc, - allocator_config: Arc) - -> thread::JoinHandle> { - let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); - let sess = tcx.sess; - - // Compute the set of symbols we need to retain when doing LTO (if we need to) - let exported_symbols = { - let mut exported_symbols = FxHashMap::default(); - - let copy_symbols = |cnum| { - let symbols = tcx.exported_symbols(cnum) - .iter() - .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) - .collect(); - Arc::new(symbols) - }; - - match sess.lto() { - Lto::No => None, - Lto::ThinLocal => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - Some(Arc::new(exported_symbols)) - } - Lto::Fat | Lto::Thin => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - for &cnum in tcx.crates().iter() { - exported_symbols.insert(cnum, copy_symbols(cnum)); - } - Some(Arc::new(exported_symbols)) - } - } - }; - - // First up, convert our jobserver into a helper thread so we can use normal - // mpsc channels to manage our messages and such. - // After we've requested tokens then we'll, when we can, - // get tokens on `coordinator_receive` which will - // get managed in the main loop below. - let coordinator_send2 = coordinator_send.clone(); - let helper = jobserver.into_helper_thread(move |token| { - drop(coordinator_send2.send(Box::new(Message::Token(token)))); - }).expect("failed to spawn helper thread"); - - let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { - if link::ignored_for_lto(sess, crate_info, cnum) { - return - } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); - })); - - let assembler_cmd = if modules_config.no_integrated_as { - // HACK: currently we use linker (gcc) as our assembler - let (linker, flavor) = link::linker_and_flavor(sess); - - let (name, mut cmd) = get_linker(sess, &linker, flavor); - cmd.args(&sess.target.target.options.asm_args); - Some(Arc::new(AssemblerCommand { - name, - cmd, - })) - } else { - None - }; - - let cgcx = CodegenContext { - crate_types: sess.crate_types.borrow().clone(), - each_linked_rlib_for_lto, - lto: sess.lto(), - no_landing_pads: sess.no_landing_pads(), - fewer_names: sess.fewer_names(), - save_temps: sess.opts.cg.save_temps, - opts: Arc::new(sess.opts.clone()), - time_passes: sess.time_passes(), - exported_symbols, - plugin_passes: sess.plugin_llvm_passes.borrow().clone(), - remark: sess.opts.cg.remark.clone(), - worker: 0, - incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), - cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), - coordinator_send, - diag_emitter: shared_emitter.clone(), - time_graph, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - regular_module_config: modules_config, - metadata_module_config: metadata_config, - allocator_module_config: allocator_config, - tm_factory: target_machine_factory(tcx.sess, false), - total_cgus, - msvc_imps_needed: msvc_imps_needed(tcx), - target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), - debuginfo: tcx.sess.opts.debuginfo, - assembler_cmd, - }; - - // This is the "main loop" of parallel work happening for parallel codegen. - // It's here that we manage parallelism, schedule work, and work with - // messages coming from clients. - // - // There are a few environmental pre-conditions that shape how the system - // is set up: - // - // - Error reporting only can happen on the main thread because that's the - // only place where we have access to the compiler `Session`. - // - LLVM work can be done on any thread. - // - Codegen can only happen on the main thread. - // - Each thread doing substantial work most be in possession of a `Token` - // from the `Jobserver`. - // - The compiler process always holds one `Token`. Any additional `Tokens` - // have to be requested from the `Jobserver`. - // - // Error Reporting - // =============== - // The error reporting restriction is handled separately from the rest: We - // set up a `SharedEmitter` the holds an open channel to the main thread. - // When an error occurs on any thread, the shared emitter will send the - // error message to the receiver main thread (`SharedEmitterMain`). The - // main thread will periodically query this error message queue and emit - // any error messages it has received. It might even abort compilation if - // has received a fatal error. In this case we rely on all other threads - // being torn down automatically with the main thread. - // Since the main thread will often be busy doing codegen work, error - // reporting will be somewhat delayed, since the message queue can only be - // checked in between to work packages. - // - // Work Processing Infrastructure - // ============================== - // The work processing infrastructure knows three major actors: - // - // - the coordinator thread, - // - the main thread, and - // - LLVM worker threads - // - // The coordinator thread is running a message loop. It instructs the main - // thread about what work to do when, and it will spawn off LLVM worker - // threads as open LLVM WorkItems become available. - // - // The job of the main thread is to codegen CGUs into LLVM work package - // (since the main thread is the only thread that can do this). The main - // thread will block until it receives a message from the coordinator, upon - // which it will codegen one CGU, send it to the coordinator and block - // again. This way the coordinator can control what the main thread is - // doing. - // - // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is - // available, it will spawn off a new LLVM worker thread and let it process - // that a WorkItem. When a LLVM worker thread is done with its WorkItem, - // it will just shut down, which also frees all resources associated with - // the given LLVM module, and sends a message to the coordinator that the - // has been completed. - // - // Work Scheduling - // =============== - // The scheduler's goal is to minimize the time it takes to complete all - // work there is, however, we also want to keep memory consumption low - // if possible. These two goals are at odds with each other: If memory - // consumption were not an issue, we could just let the main thread produce - // LLVM WorkItems at full speed, assuring maximal utilization of - // Tokens/LLVM worker threads. However, since codegen usual is faster - // than LLVM processing, the queue of LLVM WorkItems would fill up and each - // WorkItem potentially holds on to a substantial amount of memory. - // - // So the actual goal is to always produce just enough LLVM WorkItems as - // not to starve our LLVM worker threads. That means, once we have enough - // WorkItems in our queue, we can block the main thread, so it does not - // produce more until we need them. - // - // Doing LLVM Work on the Main Thread - // ---------------------------------- - // Since the main thread owns the compiler processes implicit `Token`, it is - // wasteful to keep it blocked without doing any work. Therefore, what we do - // in this case is: We spawn off an additional LLVM worker thread that helps - // reduce the queue. The work it is doing corresponds to the implicit - // `Token`. The coordinator will mark the main thread as being busy with - // LLVM work. (The actual work happens on another OS thread but we just care - // about `Tokens`, not actual threads). - // - // When any LLVM worker thread finishes while the main thread is marked as - // "busy with LLVM work", we can do a little switcheroo: We give the Token - // of the just finished thread to the LLVM worker thread that is working on - // behalf of the main thread's implicit Token, thus freeing up the main - // thread again. The coordinator can then again decide what the main thread - // should do. This allows the coordinator to make decisions at more points - // in time. - // - // Striking a Balance between Throughput and Memory Consumption - // ------------------------------------------------------------ - // Since our two goals, (1) use as many Tokens as possible and (2) keep - // memory consumption as low as possible, are in conflict with each other, - // we have to find a trade off between them. Right now, the goal is to keep - // all workers busy, which means that no worker should find the queue empty - // when it is ready to start. - // How do we do achieve this? Good question :) We actually never know how - // many `Tokens` are potentially available so it's hard to say how much to - // fill up the queue before switching the main thread to LLVM work. Also we - // currently don't have a means to estimate how long a running LLVM worker - // will still be busy with it's current WorkItem. However, we know the - // maximal count of available Tokens that makes sense (=the number of CPU - // cores), so we can take a conservative guess. The heuristic we use here - // is implemented in the `queue_full_enough()` function. - // - // Some Background on Jobservers - // ----------------------------- - // It's worth also touching on the management of parallelism here. We don't - // want to just spawn a thread per work item because while that's optimal - // parallelism it may overload a system with too many threads or violate our - // configuration for the maximum amount of cpu to use for this process. To - // manage this we use the `jobserver` crate. - // - // Job servers are an artifact of GNU make and are used to manage - // parallelism between processes. A jobserver is a glorified IPC semaphore - // basically. Whenever we want to run some work we acquire the semaphore, - // and whenever we're done with that work we release the semaphore. In this - // manner we can ensure that the maximum number of parallel workers is - // capped at any one point in time. - // - // LTO and the coordinator thread - // ------------------------------ - // - // The final job the coordinator thread is responsible for is managing LTO - // and how that works. When LTO is requested what we'll to is collect all - // optimized LLVM modules into a local vector on the coordinator. Once all - // modules have been codegened and optimized we hand this to the `lto` - // module for further optimization. The `lto` module will return back a list - // of more modules to work on, which the coordinator will continue to spawn - // work for. - // - // Each LLVM module is automatically sent back to the coordinator for LTO if - // necessary. There's already optimizations in place to avoid sending work - // back to the coordinator if LTO isn't requested. - return thread::spawn(move || { - // We pretend to be within the top-level LLVM time-passes task here: - set_time_depth(1); - - let max_workers = ::num_cpus::get(); - let mut worker_id_counter = 0; - let mut free_worker_ids = Vec::new(); - let mut get_worker_id = |free_worker_ids: &mut Vec| { - if let Some(id) = free_worker_ids.pop() { - id - } else { - let id = worker_id_counter; - worker_id_counter += 1; - id - } - }; - - // This is where we collect codegen units that have gone all the way - // through codegen and LLVM. - let mut compiled_modules = vec![]; - let mut compiled_metadata_module = None; - let mut compiled_allocator_module = None; - let mut needs_lto = Vec::new(); - let mut lto_import_only_modules = Vec::new(); - let mut started_lto = false; - - // This flag tracks whether all items have gone through codegens - let mut codegen_done = false; - - // This is the queue of LLVM work items that still need processing. - let mut work_items = Vec::<(WorkItem, u64)>::new(); - - // This are the Jobserver Tokens we currently hold. Does not include - // the implicit Token the compiler process owns no matter what. - let mut tokens = Vec::new(); - - let mut main_thread_worker_state = MainThreadWorkerState::Idle; - let mut running = 0; - - let mut llvm_start_time = None; - - // Run the message loop while there's still anything that needs message - // processing: - while !codegen_done || - work_items.len() > 0 || - running > 0 || - needs_lto.len() > 0 || - lto_import_only_modules.len() > 0 || - main_thread_worker_state != MainThreadWorkerState::Idle { - - // While there are still CGUs to be codegened, the coordinator has - // to decide how to utilize the compiler processes implicit Token: - // For codegenning more CGU or for running them through LLVM. - if !codegen_done { - if main_thread_worker_state == MainThreadWorkerState::Idle { - if !queue_full_enough(work_items.len(), running, max_workers) { - // The queue is not full enough, codegen more items: - if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { - panic!("Could not send Message::CodegenItem to main thread") - } - main_thread_worker_state = MainThreadWorkerState::Codegenning; - } else { - // The queue is full enough to not let the worker - // threads starve. Use the implicit Token to do some - // LLVM work too. - let (item, _) = work_items.pop() - .expect("queue empty - queue_full_enough() broken?"); - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } - } - } else { - // If we've finished everything related to normal codegen - // then it must be the case that we've got some LTO work to do. - // Perform the serial work here of figuring out what we're - // going to LTO and then push a bunch of work items onto our - // queue to do LTO - if work_items.len() == 0 && - running == 0 && - main_thread_worker_state == MainThreadWorkerState::Idle { - assert!(!started_lto); - assert!(needs_lto.len() + lto_import_only_modules.len() > 0); - started_lto = true; - let modules = mem::replace(&mut needs_lto, Vec::new()); - let import_only_modules = - mem::replace(&mut lto_import_only_modules, Vec::new()); - for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { - let insertion_index = work_items - .binary_search_by_key(&cost, |&(_, cost)| cost) - .unwrap_or_else(|e| e); - work_items.insert(insertion_index, (work, cost)); - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - } - } - - // In this branch, we know that everything has been codegened, - // so it's just a matter of determining whether the implicit - // Token is free to use for LLVM work. - match main_thread_worker_state { - MainThreadWorkerState::Idle => { - if let Some((item, _)) = work_items.pop() { - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } else { - // There is no unstarted work, so let the main thread - // take over for a running worker. Otherwise the - // implicit token would just go to waste. - // We reduce the `running` counter by one. The - // `tokens.truncate()` below will take care of - // giving the Token back. - debug_assert!(running > 0); - running -= 1; - main_thread_worker_state = MainThreadWorkerState::LLVMing; - } - } - MainThreadWorkerState::Codegenning => { - bug!("codegen worker should not be codegenning after \ - codegen was already completed") - } - MainThreadWorkerState::LLVMing => { - // Already making good use of that token - } - } - } - - // Spin up what work we can, only doing this while we've got available - // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() { - let (item, _) = work_items.pop().unwrap(); - - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - - spawn_work(cgcx, item); - running += 1; - } - - // Relinquish accidentally acquired extra tokens - tokens.truncate(running); - - let msg = coordinator_receive.recv().unwrap(); - match *msg.downcast::().ok().unwrap() { - // Save the token locally and the next turn of the loop will use - // this to spawn a new unit of work, or it may get dropped - // immediately if we have no more work to spawn. - Message::Token(token) => { - match token { - Ok(token) => { - tokens.push(token); - - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - // If the main thread token is used for LLVM work - // at the moment, we turn that thread into a regular - // LLVM worker thread, so the main thread is free - // to react to codegen demand. - main_thread_worker_state = MainThreadWorkerState::Idle; - running += 1; - } - } - Err(e) => { - let msg = &format!("failed to acquire jobserver token: {}", e); - shared_emitter.fatal(msg); - // Exit the coordinator thread - panic!("{}", msg) - } - } - } - - Message::CodegenDone { llvm_work_item, cost } => { - // We keep the queue sorted by estimated processing cost, - // so that more expensive items are processed earlier. This - // is good for throughput as it gives the main thread more - // time to fill up the queue and it avoids scheduling - // expensive items to the end. - // Note, however, that this is not ideal for memory - // consumption, as LLVM module sizes are not evenly - // distributed. - let insertion_index = - work_items.binary_search_by_key(&cost, |&(_, cost)| cost); - let insertion_index = match insertion_index { - Ok(idx) | Err(idx) => idx - }; - work_items.insert(insertion_index, (llvm_work_item, cost)); - - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - Message::CodegenComplete => { - codegen_done = true; - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" - // - // Note that if the thread failed that means it panicked, so we - // abort immediately. - Message::Done { result: Ok(compiled_module), worker_id } => { - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - - free_worker_ids.push(worker_id); - - match compiled_module.kind { - ModuleKind::Regular => { - compiled_modules.push(compiled_module); - } - ModuleKind::Metadata => { - assert!(compiled_metadata_module.is_none()); - compiled_metadata_module = Some(compiled_module); - } - ModuleKind::Allocator => { - assert!(compiled_allocator_module.is_none()); - compiled_allocator_module = Some(compiled_module); - } - } - } - Message::NeedsLTO { result, worker_id } => { - assert!(!started_lto); - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - free_worker_ids.push(worker_id); - needs_lto.push(result); - } - Message::AddImportOnlyModule { module_data, work_product } => { - assert!(!started_lto); - assert!(!codegen_done); - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - lto_import_only_modules.push((module_data, work_product)); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - Message::Done { result: Err(()), worker_id: _ } => { - bug!("worker thread panicked"); - } - Message::CodegenItem => { - bug!("the coordinator should not receive codegen requests") - } - } - } - - if let Some(llvm_start_time) = llvm_start_time { - let total_llvm_time = Instant::now().duration_since(llvm_start_time); - // This is the top-level timing for all of LLVM, set the time-depth - // to zero. - set_time_depth(0); - print_time_passes_entry(cgcx.time_passes, - "LLVM passes", - total_llvm_time); - } - - // Regardless of what order these modules completed in, report them to - // the backend in the same order every time to ensure that we're handing - // out deterministic results. - compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); - - let compiled_metadata_module = compiled_metadata_module - .expect("Metadata module not compiled?"); - - Ok(CompiledModules { - modules: compiled_modules, - metadata_module: compiled_metadata_module, - allocator_module: compiled_allocator_module, - }) - }); - - // A heuristic that determines if we have enough LLVM WorkItems in the - // queue so that the main thread can do LLVM work instead of codegen - fn queue_full_enough(items_in_queue: usize, - workers_running: usize, - max_workers: usize) -> bool { - // Tune me, plz. - items_in_queue > 0 && - items_in_queue >= max_workers.saturating_sub(workers_running / 2) - } - - fn maybe_start_llvm_timer(config: &ModuleConfig, - llvm_start_time: &mut Option) { - // We keep track of the -Ztime-passes output manually, - // since the closure-based interface does not fit well here. - if config.time_passes { - if llvm_start_time.is_none() { - *llvm_start_time = Some(Instant::now()); - } - } - } -} - -pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; -pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = - time_graph::TimelineId(CODEGEN_WORKER_ID); -pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); -const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); - -fn spawn_work(cgcx: CodegenContext, work: WorkItem) { - let depth = time_depth(); - - thread::spawn(move || { - set_time_depth(depth); - - // Set up a destructor which will fire off a message that we're done as - // we exit. - struct Bomb { - coordinator_send: Sender>, - result: Option, - worker_id: usize, - } - impl Drop for Bomb { - fn drop(&mut self) { - let worker_id = self.worker_id; - let msg = match self.result.take() { - Some(WorkItemResult::Compiled(m)) => { - Message::Done { result: Ok(m), worker_id } - } - Some(WorkItemResult::NeedsLTO(m)) => { - Message::NeedsLTO { result: m, worker_id } - } - None => Message::Done { result: Err(()), worker_id } - }; - drop(self.coordinator_send.send(Box::new(msg))); - } - } - - let mut bomb = Bomb { - coordinator_send: cgcx.coordinator_send.clone(), - result: None, - worker_id: cgcx.worker, - }; - // Execute the work itself, and if it finishes successfully then flag - // ourselves as a success as well. - // - // Note that we ignore any `FatalError` coming out of `execute_work_item`, - // as a diagnostic was already sent off to the main thread - just - // surface that there was an error in this worker. - bomb.result = { - let timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND, - &work.name()) - }); - let mut timeline = timeline.unwrap_or(Timeline::noop()); - execute_work_item(&cgcx, work, &mut timeline).ok() - }; - }); -} - -pub fn run_assembler(cgcx: &CodegenContext, handler: &Handler, assembly: &Path, object: &Path) { - let assembler = cgcx.assembler_cmd - .as_ref() - .expect("cgcx.assembler_cmd is missing?"); - - let pname = &assembler.name; - let mut cmd = assembler.cmd.clone(); - cmd.arg("-c").arg("-o").arg(object).arg(assembly); - debug!("{:?}", cmd); - - match cmd.output() { - Ok(prog) => { - if !prog.status.success() { - let mut note = prog.stderr.clone(); - note.extend_from_slice(&prog.stdout); - - handler.struct_err(&format!("linking with `{}` failed: {}", - pname.display(), - prog.status)) - .note(&format!("{:?}", &cmd)) - .note(str::from_utf8(¬e[..]).unwrap()) - .emit(); - handler.abort_if_errors(); - } - }, - Err(e) => { - handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); - handler.abort_if_errors(); - } - } -} pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, config: &ModuleConfig, @@ -2189,7 +703,7 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, // reasonable defaults and prepare it to actually populate the pass // manager. let builder = llvm::LLVMPassManagerBuilderCreate(); - let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone); + let opt_size = config.opt_size.map(get_llvm_opt_size).unwrap_or(llvm::CodeGenOptSizeNone); let inline_threshold = config.inline_threshold; let pgo_gen_path = config.pgo_gen.as_ref().map(|s| { @@ -2258,276 +772,16 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, } -enum SharedEmitterMessage { - Diagnostic(Diagnostic), - InlineAsmError(u32, String), - AbortIfErrors, - Fatal(String), -} - -#[derive(Clone)] -pub struct SharedEmitter { - sender: Sender, -} - -pub struct SharedEmitterMain { - receiver: Receiver, -} - -impl SharedEmitter { - pub fn new() -> (SharedEmitter, SharedEmitterMain) { - let (sender, receiver) = channel(); - - (SharedEmitter { sender }, SharedEmitterMain { receiver }) - } - - fn inline_asm_error(&self, cookie: u32, msg: String) { - drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); - } - - fn fatal(&self, msg: &str) { - drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); - } -} - -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }))); - for child in &db.children { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }))); - } - drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); - } -} - -impl SharedEmitterMain { - pub fn check(&self, sess: &Session, blocking: bool) { - loop { - let message = if blocking { - match self.receiver.recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - } else { - match self.receiver.try_recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - }; - - match message { - Ok(SharedEmitterMessage::Diagnostic(diag)) => { - let handler = sess.diagnostic(); - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - code.clone(), - diag.lvl); - } - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); - } - } - } - Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, &msg), - None => sess.err(&msg), - } - } - Ok(SharedEmitterMessage::AbortIfErrors) => { - sess.abort_if_errors(); - } - Ok(SharedEmitterMessage::Fatal(msg)) => { - sess.fatal(&msg); - } - Err(_) => { - break; - } - } - - } - } -} - -pub struct OngoingCodegen { - crate_name: Symbol, - crate_hash: Svh, - metadata: EncodedMetadata, - windows_subsystem: Option, - linker_info: LinkerInfo, - crate_info: CrateInfo, - time_graph: Option, - coordinator_send: Sender>, - codegen_worker_receive: Receiver, - shared_emitter_main: SharedEmitterMain, - future: thread::JoinHandle>, - output_filenames: Arc, -} - -impl OngoingCodegen { - pub(crate) fn join( - self, - sess: &Session - ) -> (CodegenResults, FxHashMap) { - self.shared_emitter_main.check(sess, true); - let compiled_modules = match self.future.join() { - Ok(Ok(compiled_modules)) => compiled_modules, - Ok(Err(())) => { - sess.abort_if_errors(); - panic!("expected abort due to worker thread errors") - }, - Err(_) => { - bug!("panic during codegen/LLVM phase"); - } - }; - - sess.cgu_reuse_tracker.check_expected_reuse(sess); - - sess.abort_if_errors(); - - if let Some(time_graph) = self.time_graph { - time_graph.dump(&format!("{}-timings", self.crate_name)); - } - - let work_products = - copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, - &compiled_modules); - produce_final_output_artifacts(sess, - &compiled_modules, - &self.output_filenames); - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.codegen_units() == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } - - (CodegenResults { - crate_name: self.crate_name, - crate_hash: self.crate_hash, - metadata: self.metadata, - windows_subsystem: self.windows_subsystem, - linker_info: self.linker_info, - crate_info: self.crate_info, - - modules: compiled_modules.modules, - allocator_module: compiled_modules.allocator_module, - metadata_module: compiled_modules.metadata_module, - }, work_products) - } - - pub(crate) fn submit_pre_codegened_module_to_llvm(&self, - tcx: TyCtxt, - module: ModuleCodegen) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - - // These are generally cheap and won't through off scheduling. - let cost = 0; - submit_codegened_module_to_llvm(tcx, module, cost); - } - - pub fn codegen_finished(&self, tcx: TyCtxt) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - drop(self.coordinator_send.send(Box::new(Message::CodegenComplete))); - } - - pub fn check_for_errors(&self, sess: &Session) { - self.shared_emitter_main.check(sess, false); - } - - pub fn wait_for_signal_to_codegen_item(&self) { - match self.codegen_worker_receive.recv() { - Ok(Message::CodegenItem) => { - // Nothing to do - } - Ok(_) => panic!("unexpected message"), - Err(_) => { - // One of the LLVM threads must have panicked, fall through so - // error handling can be reached. - } - } - } -} - -pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt, - module: ModuleCodegen, - cost: u64) { - let llvm_work_item = WorkItem::Optimize(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost, - }))); -} - -pub(crate) fn submit_post_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost: 0, - }))); -} - -pub(crate) fn submit_pre_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let filename = pre_lto_bitcode_filename(&module.name); - let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); - let file = fs::File::open(&bc_path).unwrap_or_else(|e| { - panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) - }); - - let mmap = unsafe { - memmap::Mmap::map(&file).unwrap_or_else(|e| { - panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) - }) - }; - - // Schedule the module to be loaded - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::AddImportOnlyModule { - module_data: SerializedModule::FromUncompressedFile(mmap), - work_product: module.source, - }))); -} - -pub(super) fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) -} - -fn msvc_imps_needed(tcx: TyCtxt) -> bool { - // This should never be true (because it's not supported). If it is true, - // something is wrong with commandline arg validation. - assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.opts.cg.prefer_dynamic)); - - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() -} - // Create a `__imp_ = &symbol` global for every public static `symbol`. // This is required to satisfy `dllimport` references to static data in .rlibs // when using MSVC linker. We do this only for data, as linker can fix up // code references on its own. // See #26591, #27438 -fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module) { +fn create_msvc_imps( + cgcx: &CodegenContext, + llcx: &llvm::Context, + llmod: &llvm::Module +) { if !cgcx.msvc_imps_needed { return } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index d908e73d14e04..eeab1ab51350f 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -28,7 +28,6 @@ use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use super::LlvmCodegenBackend; -use back::write; use llvm; use metadata; use rustc::mir::mono::{Linkage, Visibility, Stats}; @@ -44,6 +43,7 @@ use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_data_structures::small_c_str::SmallCStr; use rustc_codegen_ssa::interfaces::*; +use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm; use std::ffi::CString; use std::time::Instant; @@ -54,7 +54,7 @@ use value::Value; -pub(crate) fn write_metadata<'a, 'gcx>( +pub fn write_metadata<'a, 'gcx>( tcx: TyCtxt<'a, 'gcx, 'gcx>, llvm_module: &ModuleLlvm ) -> EncodedMetadata { @@ -164,9 +164,7 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; - write::submit_codegened_module_to_llvm(tcx, - module, - cost); + submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost); return stats; fn module_codegen<'ll, 'tcx>( diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 43b9911e14338..38d3276faa2e9 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -42,7 +42,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati let pointer_size = layout.pointer_size.bytes() as usize; let mut next_offset = 0; - for &(offset, alloc_id) in alloc.relocations.iter() { + for &(offset, ((), alloc_id)) in alloc.relocations.iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; @@ -54,7 +54,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocati &alloc.bytes[offset..(offset + pointer_size)], ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; llvals.push(cx.scalar_to_backend( - Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(), + Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), &layout::Scalar { value: layout::Primitive::Pointer, valid_range: 0..=!0 diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 024b3613b6075..dc37d351b403f 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -317,9 +317,9 @@ impl<'ll, 'tcx, Value : Eq+Hash> CodegenCx<'ll, 'tcx, Value> { } impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - fn vtables(&self) -> &RefCell, - Option>), &'ll Value>> - { + fn vtables(&self) -> &RefCell< + FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), &'ll Value> + > { &self.vtables } @@ -478,7 +478,7 @@ impl IntrinsicDeclarationMethods<'b> for CodegenCx<'b, 'tcx, &'b Value> { return v; } - declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) } fn declare_intrinsic( diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 256d6aa8fcf3f..f2ee7576431da 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -405,7 +405,7 @@ impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll V } } - create_DIArray(DIB(cx), &signature[..]); + create_DIArray(DIB(cx), &signature[..]) } fn get_template_parameters<'ll, 'tcx>( diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs index 5721938c9c0a7..5ceb53a40b407 100644 --- a/src/librustc_codegen_llvm/diagnostics.rs +++ b/src/librustc_codegen_llvm/diagnostics.rs @@ -45,39 +45,6 @@ extern "platform-intrinsic" { unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok! ``` -"##, - -E0668: r##" -Malformed inline assembly rejected by LLVM. - -LLVM checks the validity of the constraints and the assembly string passed to -it. This error implies that LLVM seems something wrong with the inline -assembly call. - -In particular, it can happen if you forgot the closing bracket of a register -constraint (see issue #51430): -```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) -#![feature(asm)] - -fn main() { - let rax: u64; - unsafe { - asm!("" :"={rax"(rax)); - println!("Accumulator is: {}", rax); - } -} -``` -"##, - -E0669: r##" -Cannot convert inline assembly operand to a single LLVM value. - -This error usually happens when trying to pass in a value to an input inline -assembly operand that is actually a pair of values. In particular, this can -happen when trying to pass in a slice, for instance a `&str`. In Rust, these -values are represented internally as a pair of values, the pointer and its -length. When passed as an input operand, this pair of values can not be -coerced into a register and thus we must fail with an error. -"##, +"## } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index dfdfd46bcc3b5..1ae4093c6a1e3 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -98,6 +98,11 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let cx = self.cx(); let tcx = cx.tcx; + let (def_id, substs) = match callee_ty.sty { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty) + }; + let sig = callee_ty.fn_sig(tcx); let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); let arg_tys = sig.inputs(); @@ -110,20 +115,20 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> let simple = get_simple_intrinsic(cx, name); let llval = match name { _ if simple.is_some() => { - bx.call(simple.unwrap(), - &args.iter().map(|arg| arg.immediate()).collect::>(), - None) + self.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) } "unreachable" => { return; }, "likely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(true)], None) + self.call(expect, &[args[0].immediate(), cx.const_bool(true)], None) } "unlikely" => { let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None) + self.call(expect, &[args[0].immediate(), cx.const_bool(false)], None) } "try" => { try_intrinsic(self, cx, @@ -135,7 +140,7 @@ impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> } "breakpoint" => { let llfn = cx.get_intrinsic(&("llvm.debugtrap")); - bx.call(llfn, &[], None) + self.call(llfn, &[], None) } "size_of" => { let tp_ty = substs.type_at(0); diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index d11af9fa64b84..e2ca821ecb2a5 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -68,15 +68,17 @@ extern crate tempfile; extern crate memmap; use rustc_codegen_ssa::interfaces::*; -use time_graph::TimeGraph; -use std::sync::mpsc::Receiver; -use back::write::{self, OngoingCodegen}; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, DiagnosticHandlers}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; +use rustc_codegen_ssa::CompiledModule; +use errors::{FatalError, Handler}; +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; use syntax_pos::symbol::InternedString; use rustc::mir::mono::Stats; - pub use llvm_util::target_features; use std::any::Any; -use std::sync::mpsc; +use std::sync::{mpsc, Arc}; use rustc::dep_graph::DepGraph; use rustc::middle::allocator::AllocatorKind; @@ -87,9 +89,8 @@ use rustc::ty::{self, TyCtxt}; use rustc::util::time_graph; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; -use rustc_codegen_ssa::{ModuleCodegen, CompiledModule, CachedModuleCodegen, CrateInfo}; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_utils::codegen_backend::CodegenBackend; -use rustc_data_structures::svh::Svh; mod diagnostics; @@ -97,11 +98,8 @@ mod back { pub use rustc_codegen_utils::symbol_names; mod archive; pub mod bytecode; - mod command; - pub mod linker; pub mod link; pub mod lto; - pub mod symbol_export; pub mod write; mod rpath; pub mod wasm; @@ -131,12 +129,10 @@ mod type_; mod type_of; mod value; +#[derive(Clone)] pub struct LlvmCodegenBackend(()); impl ExtraBackendMethods for LlvmCodegenBackend { - type Metadata = ModuleLlvm; - type OngoingCodegen = OngoingCodegen; - fn thin_lto_available(&self) -> bool { unsafe { !llvm::LLVMRustThinLTOAvailable() } } @@ -153,42 +149,9 @@ impl ExtraBackendMethods for LlvmCodegenBackend { ) -> EncodedMetadata { base::write_metadata(tcx, metadata) } - fn start_async_codegen( - &self, - tcx: TyCtxt, - time_graph: Option, - metadata: EncodedMetadata, - coordinator_receive: Receiver>, - total_cgus: usize - ) -> OngoingCodegen { - write::start_async_codegen(tcx, time_graph, metadata, coordinator_receive, total_cgus) - } - fn submit_pre_codegened_module_to_llvm( - &self, - codegen : &OngoingCodegen, - tcx: TyCtxt, - module: ModuleCodegen - ) { - codegen.submit_pre_codegened_module_to_llvm(tcx, module) - } - fn submit_pre_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) { - write::submit_pre_lto_module_to_llvm(tcx, module) - } - fn submit_post_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) { - write::submit_post_lto_module_to_llvm(tcx, module) - } - fn codegen_finished(&self, codegen : &OngoingCodegen, tcx: TyCtxt) { - codegen.codegen_finished(tcx) - } - fn check_for_errors(&self, codegen: &OngoingCodegen, sess: &Session) { - codegen.check_for_errors(sess) - } fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { unsafe { allocator::codegen(tcx, mods, kind) } } - fn wait_for_signal_to_codegen_item(&self, codegen: &OngoingCodegen) { - codegen.wait_for_signal_to_codegen_item() - } fn compile_codegen_unit<'ll, 'tcx: 'll>( &self, tcx: TyCtxt<'ll, 'tcx, 'tcx>, @@ -196,11 +159,95 @@ impl ExtraBackendMethods for LlvmCodegenBackend { ) -> Stats { base::compile_codegen_unit(tcx, cgu_name) } + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool + ) -> Arc + Result<&'static mut llvm::TargetMachine, String> + Send + Sync> { + back::write::target_machine_factory(sess, find_features) + } + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { + llvm_util::target_cpu(sess) + } } +impl Clone for &'static mut llvm::TargetMachine { + fn clone(&self) -> Self { + // This method should never be called. It is put here because in + // rustc_codegen_ssa::back::write::CodegenContext, the TargetMachine is contained in a + // closure returned by a function under an Arc. The clone-deriving algorithm works when the + // struct contains the original LLVM TargetMachine type but not any more when supplied with + // a generic type. Hence this dummy Clone implementation. + panic!() + } +} + +impl WriteBackendMethods for LlvmCodegenBackend { + type Module = ModuleLlvm; + type ModuleBuffer = back::lto::ModuleBuffer; + type Context = llvm::Context; + type TargetMachine = &'static mut llvm::TargetMachine; + type ThinData = back::lto::ThinData; + type ThinBuffer = back::lto::ThinBuffer; + fn print_pass_timings(&self) { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline + ) -> Result<(Vec>, Vec), FatalError> { + back::lto::run(cgcx, modules, cached_modules, timeline) + } + fn new_diagnostic_handlers<'a>( + cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a Self::Context + ) -> DiagnosticHandlers<'a, Self> { + back::write::new_diagnostic_handlers(cgcx, handler, llcx) + } + fn drop_diagnostic_handlers<'a>(diag : &mut DiagnosticHandlers<'a, Self>) { + back::write::drop_diagnostic_handlers(diag) + } + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result<(), FatalError> { + back::write::optimize(cgcx, diag_handler, module, config, timeline) + } + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline + ) -> Result, FatalError> { + back::lto::optimize_thin_module(thin, cgcx, timeline) + } + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result { + back::write::codegen(cgcx, diag_handler, module, config, timeline) + } + fn run_lto_pass_manager( + cgcx: &CodegenContext, + module: &ModuleCodegen, + config: &ModuleConfig, + thin: bool + ) { + back::lto::run_pass_manager(cgcx, module, config, thin) + } +} -impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis -impl !Sync for LlvmCodegenBackend {} +unsafe impl<'a> Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis +unsafe impl<'a> Sync for LlvmCodegenBackend {} impl LlvmCodegenBackend { pub fn new() -> Box { @@ -208,7 +255,7 @@ impl LlvmCodegenBackend { } } -impl CodegenBackend for LlvmCodegenBackend { +impl<'a> CodegenBackend for LlvmCodegenBackend { fn init(&self, sess: &Session) { llvm_util::init(sess); // Make sure llvm is inited } @@ -262,20 +309,20 @@ impl CodegenBackend for LlvmCodegenBackend { fn provide(&self, providers: &mut ty::query::Providers) { back::symbol_names::provide(providers); - back::symbol_export::provide(providers); + rustc_codegen_ssa::back::symbol_export::provide(providers); rustc_codegen_ssa::base::provide(providers); attributes::provide(providers); } fn provide_extern(&self, providers: &mut ty::query::Providers) { - back::symbol_export::provide_extern(providers); + rustc_codegen_ssa::back::symbol_export::provide_extern(providers); rustc_codegen_ssa::base::provide_extern(providers); attributes::provide_extern(providers); } - fn codegen_crate<'a, 'tcx>( + fn codegen_crate<'b, 'tcx>( &self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'b, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> Box { box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) @@ -290,11 +337,12 @@ impl CodegenBackend for LlvmCodegenBackend { ) -> Result<(), CompileIncomplete>{ use rustc::util::common::time; let (ongoing_codegen, work_products) = - ongoing_codegen.downcast::<::back::write::OngoingCodegen>() + ongoing_codegen.downcast:: + >() .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box") .join(sess); if sess.opts.debugging_opts.incremental_info { - back::write::dump_incremental_data(&ongoing_codegen); + rustc_codegen_ssa::back::write::dump_incremental_data(&ongoing_codegen); } time(sess, @@ -370,15 +418,4 @@ impl Drop for ModuleLlvm { } } -struct CodegenResults { - crate_name: Symbol, - modules: Vec, - allocator_module: Option, - metadata_module: CompiledModule, - crate_hash: Svh, - metadata: rustc::middle::cstore::EncodedMetadata, - windows_subsystem: Option, - linker_info: back::linker::LinkerInfo, - crate_info: CrateInfo, -} __build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS } diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml index 16b00802cd1cb..6e2e0cb32f23a 100644 --- a/src/librustc_codegen_ssa/Cargo.toml +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -12,6 +12,11 @@ test = false [dependencies] bitflags = "1.0" log = "0.4" +num_cpus = "1.0" +rustc-demangle = "0.1.4" +memmap = "0.6" +jobserver = "0.1" +cc = "1.0.1" syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } @@ -19,6 +24,10 @@ rustc = { path = "../librustc" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_apfloat = { path = "../librustc_apfloat" } +rustc_allocator = { path = "../librustc_allocator" } +rustc_fs_util = { path = "../librustc_fs_util" } +rustc_errors = { path = "../librustc_errors" } rustc_mir = { path = "../librustc_mir" } rustc_codegen_utils = { path = "../librustc_codegen_utils" } rustc_incremental = { path = "../librustc_incremental" } +serialize = { path = "../libserialize" } diff --git a/src/librustc_codegen_ssa/back/archive.rs b/src/librustc_codegen_ssa/back/archive.rs new file mode 100644 index 0000000000000..b5e1deb0d5df3 --- /dev/null +++ b/src/librustc_codegen_ssa/back/archive.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::session::Session; + +use std::path::PathBuf; + +pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) + -> PathBuf { + // On Windows, static libraries sometimes show up as libfoo.a and other + // times show up as foo.lib + let oslibname = format!("{}{}{}", + sess.target.target.options.staticlib_prefix, + name, + sess.target.target.options.staticlib_suffix); + let unixlibname = format!("lib{}.a", name); + + for path in search_paths { + debug!("looking for {} inside {:?}", name, path); + let test = path.join(&oslibname); + if test.exists() { return test } + if oslibname != unixlibname { + let test = path.join(&unixlibname); + if test.exists() { return test } + } + } + sess.fatal(&format!("could not find native static library `{}`, \ + perhaps an -L flag is missing?", name)); +} diff --git a/src/librustc_codegen_llvm/back/command.rs b/src/librustc_codegen_ssa/back/command.rs similarity index 100% rename from src/librustc_codegen_llvm/back/command.rs rename to src/librustc_codegen_ssa/back/command.rs diff --git a/src/librustc_codegen_ssa/back/link.rs b/src/librustc_codegen_ssa/back/link.rs new file mode 100644 index 0000000000000..4b9415fb0069a --- /dev/null +++ b/src/librustc_codegen_ssa/back/link.rs @@ -0,0 +1,213 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. + +use rustc::session::{Session, config}; +use rustc::session::search_paths::PathKind; +use rustc::middle::dependency_format::Linkage; +use rustc::middle::cstore::LibSource; +use rustc_target::spec::LinkerFlavor; +use rustc::hir::def_id::CrateNum; + +use super::command::Command; +use CrateInfo; + +use cc::windows_registry; +use std::fs; +use std::path::{Path, PathBuf}; +use std::env; + + +// The third parameter is for env vars, used on windows to set up the +// path for MSVC to find its DLLs, and gcc to find its bundled +// toolchain +pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { + let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); + + // If our linker looks like a batch script on Windows then to execute this + // we'll need to spawn `cmd` explicitly. This is primarily done to handle + // emscripten where the linker is `emcc.bat` and needs to be spawned as + // `cmd /c emcc.bat ...`. + // + // This worked historically but is needed manually since #42436 (regression + // was tagged as #42791) and some more info can be found on #44443 for + // emscripten itself. + let mut cmd = match linker.to_str() { + Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), + _ => match flavor { + LinkerFlavor::Lld(f) => Command::lld(linker, f), + LinkerFlavor::Msvc + if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => + { + Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) + }, + _ => Command::new(linker), + } + }; + + // The compiler's sysroot often has some bundled tools, so add it to the + // PATH for the child. + let mut new_path = sess.host_filesearch(PathKind::All) + .get_tools_search_paths(); + let mut msvc_changed_path = false; + if sess.target.target.options.is_like_msvc { + if let Some(ref tool) = msvc_tool { + cmd.args(tool.args()); + for &(ref k, ref v) in tool.env() { + if k == "PATH" { + new_path.extend(env::split_paths(v)); + msvc_changed_path = true; + } else { + cmd.env(k, v); + } + } + } + } + + if !msvc_changed_path { + if let Some(path) = env::var_os("PATH") { + new_path.extend(env::split_paths(&path)); + } + } + cmd.env("PATH", env::join_paths(new_path).unwrap()); + + (linker.to_path_buf(), cmd) +} + +pub fn each_linked_rlib(sess: &Session, + info: &CrateInfo, + f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { + let crates = info.used_crates_static.iter(); + let fmts = sess.dependency_formats.borrow(); + let fmts = fmts.get(&config::CrateType::Executable) + .or_else(|| fmts.get(&config::CrateType::Staticlib)) + .or_else(|| fmts.get(&config::CrateType::Cdylib)) + .or_else(|| fmts.get(&config::CrateType::ProcMacro)); + let fmts = match fmts { + Some(f) => f, + None => return Err("could not find formats for rlibs".to_string()) + }; + for &(cnum, ref path) in crates { + match fmts.get(cnum.as_usize() - 1) { + Some(&Linkage::NotLinked) | + Some(&Linkage::IncludedFromDylib) => continue, + Some(_) => {} + None => return Err("could not find formats for rlibs".to_string()) + } + let name = &info.crate_name[&cnum]; + let path = match *path { + LibSource::Some(ref p) => p, + LibSource::MetadataOnly => { + return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)) + } + LibSource::None => { + return Err(format!("could not find rlib for: `{}`", name)) + } + }; + f(cnum, &path); + } + Ok(()) +} + + +/// Returns a boolean indicating whether the specified crate should be ignored +/// during LTO. +/// +/// Crates ignored during LTO are not lumped together in the "massive object +/// file" that we create and are linked in their normal rlib states. See +/// comments below for what crates do not participate in LTO. +/// +/// It's unusual for a crate to not participate in LTO. Typically only +/// compiler-specific and unstable crates have a reason to not participate in +/// LTO. +pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { + // If our target enables builtin function lowering in LLVM then the + // crates providing these functions don't participate in LTO (e.g. + // no_builtins or compiler builtins crates). + !sess.target.target.options.no_builtins && + (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum)) +} + +pub fn remove(sess: &Session, path: &Path) { + match fs::remove_file(path) { + Ok(..) => {} + Err(e) => { + sess.err(&format!("failed to remove {}: {}", + path.display(), + e)); + } + } +} + +pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { + fn infer_from( + sess: &Session, + linker: Option, + flavor: Option, + ) -> Option<(PathBuf, LinkerFlavor)> { + match (linker, flavor) { + (Some(linker), Some(flavor)) => Some((linker, flavor)), + // only the linker flavor is known; use the default linker for the selected flavor + (None, Some(flavor)) => Some((PathBuf::from(match flavor { + LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, + LinkerFlavor::Gcc => "cc", + LinkerFlavor::Ld => "ld", + LinkerFlavor::Msvc => "link.exe", + LinkerFlavor::Lld(_) => "lld", + }), flavor)), + (Some(linker), None) => { + let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { + sess.fatal("couldn't extract file stem from specified linker"); + }).to_owned(); + + let flavor = if stem == "emcc" { + LinkerFlavor::Em + } else if stem == "gcc" || stem.ends_with("-gcc") { + LinkerFlavor::Gcc + } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { + LinkerFlavor::Ld + } else if stem == "link" || stem == "lld-link" { + LinkerFlavor::Msvc + } else if stem == "lld" || stem == "rust-lld" { + LinkerFlavor::Lld(sess.target.target.options.lld_flavor) + } else { + // fall back to the value in the target spec + sess.target.target.linker_flavor + }; + + Some((linker, flavor)) + }, + (None, None) => None, + } + } + + // linker and linker flavor specified via command line have precedence over what the target + // specification specifies + if let Some(ret) = infer_from( + sess, + sess.opts.cg.linker.clone(), + sess.opts.debugging_opts.linker_flavor, + ) { + return ret; + } + + if let Some(ret) = infer_from( + sess, + sess.target.target.options.linker.clone().map(PathBuf::from), + Some(sess.target.target.linker_flavor), + ) { + return ret; + } + + bug!("Not enough information provided to determine how to invoke the linker"); +} diff --git a/src/librustc_codegen_llvm/back/linker.rs b/src/librustc_codegen_ssa/back/linker.rs similarity index 96% rename from src/librustc_codegen_llvm/back/linker.rs rename to src/librustc_codegen_ssa/back/linker.rs index e18c8b9dec463..34d0e5349600e 100644 --- a/src/librustc_codegen_llvm/back/linker.rs +++ b/src/librustc_codegen_ssa/back/linker.rs @@ -1,4 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,38 +8,41 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. + +use super::symbol_export; +use super::command::Command; +use super::archive; +use interfaces::*; + +use rustc_target::spec::{LinkerFlavor, LldFlavor}; use rustc_data_structures::fx::FxHashMap; -use std::ffi::{OsStr, OsString}; +use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, CrossLangLto}; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::middle::dependency_format::Linkage; + use std::fs::{self, File}; +use std::ffi::{OsString, OsStr}; +use std::path::{Path, PathBuf}; use std::io::prelude::*; use std::io::{self, BufWriter}; -use std::path::{Path, PathBuf}; - -use back::archive; -use back::command::Command; -use back::symbol_export; -use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; -use rustc::middle::dependency_format::Linkage; -use rustc::session::Session; -use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, - CrossLangLto}; -use rustc::ty::TyCtxt; -use rustc_target::spec::{LinkerFlavor, LldFlavor}; use serialize::{json, Encoder}; -use llvm_util; -/// For all the linkers we support, and information they might -/// need out of the shared crate context before we get rid of it. -pub struct LinkerInfo { +pub struct LinkerInfo { exports: FxHashMap>, + backend: B } -impl LinkerInfo { - pub fn new(tcx: TyCtxt) -> LinkerInfo { +impl LinkerInfo { + pub fn new(tcx: TyCtxt, backend: B) -> LinkerInfo { LinkerInfo { exports: tcx.sess.crate_types.borrow().iter().map(|&c| { (c, exported_symbols(tcx, c)) }).collect(), + backend } } @@ -96,6 +99,7 @@ impl LinkerInfo { } } + /// Linker abstraction used by back::link to build up the command to invoke a /// linker. /// @@ -137,16 +141,249 @@ pub trait Linker { fn finalize(&mut self) -> Command; } -pub struct GccLinker<'a> { + +impl<'a, B : ExtraBackendMethods> Linker for MsvcLinker<'a, B> { + fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); } + fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } + fn args(&mut self, args: &[String]) { self.cmd.args(args); } + + fn build_dylib(&mut self, out_filename: &Path) { + self.cmd.arg("/DLL"); + let mut arg: OsString = "/IMPLIB:".into(); + arg.push(out_filename.with_extension("dll.lib")); + self.cmd.arg(arg); + } + + fn build_static_executable(&mut self) { + // noop + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + // MSVC's ICF (Identical COMDAT Folding) link optimization is + // slow for Rust and thus we disable it by default when not in + // optimization build. + if self.sess.opts.optimize != config::OptLevel::No { + self.cmd.arg("/OPT:REF,ICF"); + } else { + // It is necessary to specify NOICF here, because /OPT:REF + // implies ICF by default. + self.cmd.arg("/OPT:REF,NOICF"); + } + } + + fn link_dylib(&mut self, lib: &str) { + self.cmd.arg(&format!("{}.lib", lib)); + } + + fn link_rust_dylib(&mut self, lib: &str, path: &Path) { + // When producing a dll, the MSVC linker may not actually emit a + // `foo.lib` file if the dll doesn't actually export any symbols, so we + // check to see if the file is there and just omit linking to it if it's + // not present. + let name = format!("{}.dll.lib", lib); + if fs::metadata(&path.join(&name)).is_ok() { + self.cmd.arg(name); + } + } + + fn link_staticlib(&mut self, lib: &str) { + self.cmd.arg(&format!("{}.lib", lib)); + } + + fn position_independent_executable(&mut self) { + // noop + } + + fn no_position_independent_executable(&mut self) { + // noop + } + + fn full_relro(&mut self) { + // noop + } + + fn partial_relro(&mut self) { + // noop + } + + fn no_relro(&mut self) { + // noop + } + + fn no_default_libraries(&mut self) { + // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC + // as there's been trouble in the past of linking the C++ standard + // library required by LLVM. This likely needs to happen one day, but + // in general Windows is also a more controlled environment than + // Unix, so it's not necessarily as critical that this be implemented. + // + // Note that there are also some licensing worries about statically + // linking some libraries which require a specific agreement, so it may + // not ever be possible for us to pass this flag. + } + + fn include_path(&mut self, path: &Path) { + let mut arg = OsString::from("/LIBPATH:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn output_filename(&mut self, path: &Path) { + let mut arg = OsString::from("/OUT:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn framework_path(&mut self, _path: &Path) { + bug!("frameworks are not supported on windows") + } + fn link_framework(&mut self, _framework: &str) { + bug!("frameworks are not supported on windows") + } + + fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { + // not supported? + self.link_staticlib(lib); + } + fn link_whole_rlib(&mut self, path: &Path) { + // not supported? + self.link_rlib(path); + } + fn optimize(&mut self) { + // Needs more investigation of `/OPT` arguments + } + + fn pgo_gen(&mut self) { + // Nothing needed here. + } + + fn debuginfo(&mut self) { + // This will cause the Microsoft linker to generate a PDB file + // from the CodeView line tables in the object files. + self.cmd.arg("/DEBUG"); + + // This will cause the Microsoft linker to embed .natvis info into the the PDB file + let sysroot = self.sess.sysroot(); + let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); + if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { + // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes + // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore + // them, eventually mooting this workaround, per this landed patch: + // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 + if let Some(ref linker_path) = self.sess.opts.cg.linker { + if let Some(linker_name) = Path::new(&linker_path).file_stem() { + if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { + self.sess.warn("not embedding natvis: lld-link may not support the flag"); + return; + } + } + } + for entry in natvis_dir { + match entry { + Ok(entry) => { + let path = entry.path(); + if path.extension() == Some("natvis".as_ref()) { + let mut arg = OsString::from("/NATVIS:"); + arg.push(path); + self.cmd.arg(arg); + } + }, + Err(err) => { + self.sess.warn(&format!("error enumerating natvis directory: {}", err)); + }, + } + } + } + } + + // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to + // export symbols from a dynamic library. When building a dynamic library, + // however, we're going to want some symbols exported, so this function + // generates a DEF file which lists all the symbols. + // + // The linker will read this `*.def` file and export all the symbols from + // the dynamic library. Note that this is not as simple as just exporting + // all the symbols in the current crate (as specified by `codegen.reachable`) + // but rather we also need to possibly export the symbols of upstream + // crates. Upstream rlibs may be linked statically to this dynamic library, + // in which case they may continue to transitively be used and hence need + // their symbols exported. + fn export_symbols(&mut self, + tmpdir: &Path, + crate_type: CrateType) { + let path = tmpdir.join("lib.def"); + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + + // Start off with the standard module name header and then go + // straight to exports. + writeln!(f, "LIBRARY")?; + writeln!(f, "EXPORTS")?; + for symbol in self.info.exports[&crate_type].iter() { + debug!(" _{}", symbol); + writeln!(f, " {}", symbol)?; + } + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); + } + let mut arg = OsString::from("/DEF:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn subsystem(&mut self, subsystem: &str) { + // Note that previous passes of the compiler validated this subsystem, + // so we just blindly pass it to the linker. + self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); + + // Windows has two subsystems we're interested in right now, the console + // and windows subsystems. These both implicitly have different entry + // points (starting symbols). The console entry point starts with + // `mainCRTStartup` and the windows entry point starts with + // `WinMainCRTStartup`. These entry points, defined in system libraries, + // will then later probe for either `main` or `WinMain`, respectively to + // start the application. + // + // In Rust we just always generate a `main` function so we want control + // to always start there, so we force the entry point on the windows + // subsystem to be `mainCRTStartup` to get everything booted up + // correctly. + // + // For more information see RFC #1665 + if subsystem == "windows" { + self.cmd.arg("/ENTRY:mainCRTStartup"); + } + } + + fn finalize(&mut self) -> Command { + let mut cmd = Command::new(""); + ::std::mem::swap(&mut cmd, &mut self.cmd); + cmd + } + + // MSVC doesn't need group indicators + fn group_start(&mut self) {} + fn group_end(&mut self) {} + + fn cross_lang_lto(&mut self) { + // Do nothing + } +} + + + +pub struct GccLinker<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo, + info: &'a LinkerInfo, hinted_static: bool, // Keeps track of the current hinting mode. // Link as ld is_ld: bool, } -impl<'a> GccLinker<'a> { +impl<'a, B : ExtraBackendMethods> GccLinker<'a, B> { /// Argument that must be passed *directly* to the linker /// /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used @@ -204,7 +441,7 @@ impl<'a> GccLinker<'a> { }; self.linker_arg(&format!("-plugin-opt={}", opt_level)); - self.linker_arg(&format!("-plugin-opt=mcpu={}", llvm_util::target_cpu(self.sess))); + self.linker_arg(&format!("-plugin-opt=mcpu={}", self.info.backend.target_cpu(self.sess))); match self.sess.lto() { config::Lto::Thin | @@ -219,7 +456,7 @@ impl<'a> GccLinker<'a> { } } -impl<'a> Linker for GccLinker<'a> { +impl<'a, B : ExtraBackendMethods> Linker for GccLinker<'a, B> { fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}",lib)); } fn link_staticlib(&mut self, lib: &str) { self.hint_static(); self.cmd.arg(format!("-l{}",lib)); @@ -489,249 +726,19 @@ impl<'a> Linker for GccLinker<'a> { } } -pub struct MsvcLinker<'a> { +pub struct MsvcLinker<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo + info: &'a LinkerInfo } -impl<'a> Linker for MsvcLinker<'a> { - fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); } - fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } - fn args(&mut self, args: &[String]) { self.cmd.args(args); } - - fn build_dylib(&mut self, out_filename: &Path) { - self.cmd.arg("/DLL"); - let mut arg: OsString = "/IMPLIB:".into(); - arg.push(out_filename.with_extension("dll.lib")); - self.cmd.arg(arg); - } - - fn build_static_executable(&mut self) { - // noop - } - - fn gc_sections(&mut self, _keep_metadata: bool) { - // MSVC's ICF (Identical COMDAT Folding) link optimization is - // slow for Rust and thus we disable it by default when not in - // optimization build. - if self.sess.opts.optimize != config::OptLevel::No { - self.cmd.arg("/OPT:REF,ICF"); - } else { - // It is necessary to specify NOICF here, because /OPT:REF - // implies ICF by default. - self.cmd.arg("/OPT:REF,NOICF"); - } - } - - fn link_dylib(&mut self, lib: &str) { - self.cmd.arg(&format!("{}.lib", lib)); - } - - fn link_rust_dylib(&mut self, lib: &str, path: &Path) { - // When producing a dll, the MSVC linker may not actually emit a - // `foo.lib` file if the dll doesn't actually export any symbols, so we - // check to see if the file is there and just omit linking to it if it's - // not present. - let name = format!("{}.dll.lib", lib); - if fs::metadata(&path.join(&name)).is_ok() { - self.cmd.arg(name); - } - } - - fn link_staticlib(&mut self, lib: &str) { - self.cmd.arg(&format!("{}.lib", lib)); - } - - fn position_independent_executable(&mut self) { - // noop - } - - fn no_position_independent_executable(&mut self) { - // noop - } - - fn full_relro(&mut self) { - // noop - } - - fn partial_relro(&mut self) { - // noop - } - - fn no_relro(&mut self) { - // noop - } - - fn no_default_libraries(&mut self) { - // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC - // as there's been trouble in the past of linking the C++ standard - // library required by LLVM. This likely needs to happen one day, but - // in general Windows is also a more controlled environment than - // Unix, so it's not necessarily as critical that this be implemented. - // - // Note that there are also some licensing worries about statically - // linking some libraries which require a specific agreement, so it may - // not ever be possible for us to pass this flag. - } - - fn include_path(&mut self, path: &Path) { - let mut arg = OsString::from("/LIBPATH:"); - arg.push(path); - self.cmd.arg(&arg); - } - - fn output_filename(&mut self, path: &Path) { - let mut arg = OsString::from("/OUT:"); - arg.push(path); - self.cmd.arg(&arg); - } - - fn framework_path(&mut self, _path: &Path) { - bug!("frameworks are not supported on windows") - } - fn link_framework(&mut self, _framework: &str) { - bug!("frameworks are not supported on windows") - } - - fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { - // not supported? - self.link_staticlib(lib); - } - fn link_whole_rlib(&mut self, path: &Path) { - // not supported? - self.link_rlib(path); - } - fn optimize(&mut self) { - // Needs more investigation of `/OPT` arguments - } - - fn pgo_gen(&mut self) { - // Nothing needed here. - } - - fn debuginfo(&mut self) { - // This will cause the Microsoft linker to generate a PDB file - // from the CodeView line tables in the object files. - self.cmd.arg("/DEBUG"); - - // This will cause the Microsoft linker to embed .natvis info into the the PDB file - let sysroot = self.sess.sysroot(); - let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); - if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { - // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes - // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore - // them, eventually mooting this workaround, per this landed patch: - // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 - if let Some(ref linker_path) = self.sess.opts.cg.linker { - if let Some(linker_name) = Path::new(&linker_path).file_stem() { - if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { - self.sess.warn("not embedding natvis: lld-link may not support the flag"); - return; - } - } - } - for entry in natvis_dir { - match entry { - Ok(entry) => { - let path = entry.path(); - if path.extension() == Some("natvis".as_ref()) { - let mut arg = OsString::from("/NATVIS:"); - arg.push(path); - self.cmd.arg(arg); - } - }, - Err(err) => { - self.sess.warn(&format!("error enumerating natvis directory: {}", err)); - }, - } - } - } - } - - // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to - // export symbols from a dynamic library. When building a dynamic library, - // however, we're going to want some symbols exported, so this function - // generates a DEF file which lists all the symbols. - // - // The linker will read this `*.def` file and export all the symbols from - // the dynamic library. Note that this is not as simple as just exporting - // all the symbols in the current crate (as specified by `codegen.reachable`) - // but rather we also need to possibly export the symbols of upstream - // crates. Upstream rlibs may be linked statically to this dynamic library, - // in which case they may continue to transitively be used and hence need - // their symbols exported. - fn export_symbols(&mut self, - tmpdir: &Path, - crate_type: CrateType) { - let path = tmpdir.join("lib.def"); - let res = (|| -> io::Result<()> { - let mut f = BufWriter::new(File::create(&path)?); - - // Start off with the standard module name header and then go - // straight to exports. - writeln!(f, "LIBRARY")?; - writeln!(f, "EXPORTS")?; - for symbol in self.info.exports[&crate_type].iter() { - debug!(" _{}", symbol); - writeln!(f, " {}", symbol)?; - } - Ok(()) - })(); - if let Err(e) = res { - self.sess.fatal(&format!("failed to write lib.def file: {}", e)); - } - let mut arg = OsString::from("/DEF:"); - arg.push(path); - self.cmd.arg(&arg); - } - - fn subsystem(&mut self, subsystem: &str) { - // Note that previous passes of the compiler validated this subsystem, - // so we just blindly pass it to the linker. - self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); - - // Windows has two subsystems we're interested in right now, the console - // and windows subsystems. These both implicitly have different entry - // points (starting symbols). The console entry point starts with - // `mainCRTStartup` and the windows entry point starts with - // `WinMainCRTStartup`. These entry points, defined in system libraries, - // will then later probe for either `main` or `WinMain`, respectively to - // start the application. - // - // In Rust we just always generate a `main` function so we want control - // to always start there, so we force the entry point on the windows - // subsystem to be `mainCRTStartup` to get everything booted up - // correctly. - // - // For more information see RFC #1665 - if subsystem == "windows" { - self.cmd.arg("/ENTRY:mainCRTStartup"); - } - } - - fn finalize(&mut self) -> Command { - let mut cmd = Command::new(""); - ::std::mem::swap(&mut cmd, &mut self.cmd); - cmd - } - - // MSVC doesn't need group indicators - fn group_start(&mut self) {} - fn group_end(&mut self) {} - - fn cross_lang_lto(&mut self) { - // Do nothing - } -} - -pub struct EmLinker<'a> { +pub struct EmLinker<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo + info: &'a LinkerInfo } -impl<'a> Linker for EmLinker<'a> { +impl<'a, B : ExtraBackendMethods> Linker for EmLinker<'a, B> { fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); } @@ -895,42 +902,13 @@ impl<'a> Linker for EmLinker<'a> { } } -fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { - let mut symbols = Vec::new(); - - let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); - for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - - let formats = tcx.sess.dependency_formats.borrow(); - let deps = formats[&crate_type].iter(); - - for (index, dep_format) in deps.enumerate() { - let cnum = CrateNum::new(index + 1); - // For each dependency that we are linking to statically ... - if *dep_format == Linkage::Static { - // ... we add its symbol list to our export list. - for &(symbol, level) in tcx.exported_symbols(cnum).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - } - } - - symbols -} - -pub struct WasmLd<'a> { +pub struct WasmLd<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo, + info: &'a LinkerInfo, } -impl<'a> Linker for WasmLd<'a> { +impl<'a, B : ExtraBackendMethods> Linker for WasmLd<'a, B> { fn link_dylib(&mut self, lib: &str) { self.cmd.arg("-l").arg(lib); } @@ -1093,3 +1071,33 @@ impl<'a> Linker for WasmLd<'a> { // Do nothing for now } } + + +fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { + let mut symbols = Vec::new(); + + let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); + for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + + let formats = tcx.sess.dependency_formats.borrow(); + let deps = formats[&crate_type].iter(); + + for (index, dep_format) in deps.enumerate() { + let cnum = CrateNum::new(index + 1); + // For each dependency that we are linking to statically ... + if *dep_format == Linkage::Static { + // ... we add its symbol list to our export list. + for &(symbol, level) in tcx.exported_symbols(cnum).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + } + } + + symbols +} diff --git a/src/librustc_codegen_ssa/back/lto.rs b/src/librustc_codegen_ssa/back/lto.rs new file mode 100644 index 0000000000000..944b98e84499e --- /dev/null +++ b/src/librustc_codegen_ssa/back/lto.rs @@ -0,0 +1,124 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::write::CodegenContext; +use interfaces::*; +use ModuleCodegen; + +use rustc::util::time_graph::Timeline; +use rustc_errors::FatalError; + +use std::sync::Arc; +use std::ffi::CString; + +pub struct ThinModule { + pub shared: Arc>, + pub idx: usize, +} + +impl ThinModule { + pub fn name(&self) -> &str { + self.shared.module_names[self.idx].to_str().unwrap() + } + + pub fn cost(&self) -> u64 { + // Yes, that's correct, we're using the size of the bytecode as an + // indicator for how costly this codegen unit is. + self.data().len() as u64 + } + + pub fn data(&self) -> &[u8] { + let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); + a.unwrap_or_else(|| { + let len = self.shared.thin_buffers.len(); + self.shared.serialized_modules[self.idx - len].data() + }) + } + +} + +pub struct ThinShared { + pub data: B::ThinData, + pub thin_buffers: Vec, + pub serialized_modules: Vec>, + pub module_names: Vec, +} + + +pub enum LtoModuleCodegen { + Fat { + module: Option>, + _serialized_bitcode: Vec>, + }, + + Thin(ThinModule), +} + +impl LtoModuleCodegen +{ + pub fn name(&self) -> &str { + match *self { + LtoModuleCodegen::Fat { .. } => "everything", + LtoModuleCodegen::Thin(ref m) => m.name(), + } + } + + /// Optimize this module within the given codegen context. + /// + /// This function is unsafe as it'll return a `ModuleCodegen` still + /// points to LLVM data structures owned by this `LtoModuleCodegen`. + /// It's intended that the module returned is immediately code generated and + /// dropped, and then this LTO module is dropped. + pub unsafe fn optimize( + &mut self, + cgcx: &CodegenContext, + timeline: &mut Timeline + ) -> Result, FatalError> { + match *self { + LtoModuleCodegen::Fat { ref mut module, .. } => { + let module = module.take().unwrap(); + { + let config = cgcx.config(module.kind); + B::run_lto_pass_manager(cgcx, &module, config, false); + timeline.record("fat-done"); + } + Ok(module) + } + LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin, timeline), + } + } + + /// A "gauge" of how costly it is to optimize this module, used to sort + /// biggest modules first. + pub fn cost(&self) -> u64 { + match *self { + // Only one module with fat LTO, so the cost doesn't matter. + LtoModuleCodegen::Fat { .. } => 0, + LtoModuleCodegen::Thin(ref m) => m.cost(), + } + } +} + + +pub enum SerializedModule { + Local(M), + FromRlib(Vec), + FromUncompressedFile(memmap::Mmap), +} + +impl SerializedModule { + pub fn data(&self) -> &[u8] { + match *self { + SerializedModule::Local(ref m) => m.data(), + SerializedModule::FromRlib(ref m) => m, + SerializedModule::FromUncompressedFile(ref m) => m, + } + } +} diff --git a/src/librustc_codegen_ssa/back/mod.rs b/src/librustc_codegen_ssa/back/mod.rs new file mode 100644 index 0000000000000..3d7ead74d1c5d --- /dev/null +++ b/src/librustc_codegen_ssa/back/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub mod write; +pub mod linker; +pub mod lto; +pub mod link; +pub mod command; +pub mod symbol_export; +pub mod archive; diff --git a/src/librustc_codegen_llvm/back/symbol_export.rs b/src/librustc_codegen_ssa/back/symbol_export.rs similarity index 99% rename from src/librustc_codegen_llvm/back/symbol_export.rs rename to src/librustc_codegen_ssa/back/symbol_export.rs index 6b1b0b94fd9d7..a18844f636ee9 100644 --- a/src/librustc_codegen_llvm/back/symbol_export.rs +++ b/src/librustc_codegen_ssa/back/symbol_export.rs @@ -11,7 +11,7 @@ use rustc_data_structures::sync::Lrc; use std::sync::Arc; -use monomorphize::Instance; +use rustc_mir::monomorphize::Instance; use rustc::hir; use rustc::hir::Node; use rustc::hir::CodegenFnAttrFlags; diff --git a/src/librustc_codegen_ssa/back/write.rs b/src/librustc_codegen_ssa/back/write.rs new file mode 100644 index 0000000000000..af6f88dd00c6a --- /dev/null +++ b/src/librustc_codegen_ssa/back/write.rs @@ -0,0 +1,1826 @@ +// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo, CodegenResults, + RLIB_BYTECODE_EXTENSION}; +use super::linker::LinkerInfo; +use super::lto::{self, SerializedModule}; +use super::link::{self, remove, get_linker}; +use super::command::Command; +use super::symbol_export::ExportedSymbols; + +use memmap; +use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, + in_incr_comp_dir, in_incr_comp_dir_sess}; +use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; +use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; +use rustc::middle::cstore::EncodedMetadata; +use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::Session; +use rustc::util::nodemap::FxHashMap; +use rustc::util::time_graph::{self, TimeGraph, Timeline}; +use interfaces::*; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::util::common::{time_depth, set_time_depth, print_time_passes_entry}; +use rustc_fs_util::link_or_copy; +use rustc_data_structures::svh::Svh; +use rustc_errors::{Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; +use rustc_errors::emitter::{Emitter}; +use syntax::attr; +use syntax::ext::hygiene::Mark; +use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; +use jobserver::{Client, Acquired}; + +use std::any::Any; +use std::fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::Arc; +use std::sync::mpsc::{channel, Sender, Receiver}; +use std::time::Instant; +use std::thread; + +const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; + +/// Module-specific configuration for `optimize_and_codegen`. +pub struct ModuleConfig { + /// Names of additional optimization passes to run. + pub passes: Vec, + /// Some(level) to optimize at a certain level, or None to run + /// absolutely no optimizations (used for the metadata module). + pub opt_level: Option, + + /// Some(level) to optimize binary size, or None to not affect program size. + pub opt_size: Option, + + pub pgo_gen: Option, + pub pgo_use: String, + + // Flags indicating which outputs to produce. + pub emit_pre_thin_lto_bc: bool, + pub emit_no_opt_bc: bool, + pub emit_bc: bool, + pub emit_bc_compressed: bool, + pub emit_lto_bc: bool, + pub emit_ir: bool, + pub emit_asm: bool, + pub emit_obj: bool, + // Miscellaneous flags. These are mostly copied from command-line + // options. + pub verify_llvm_ir: bool, + pub no_prepopulate_passes: bool, + pub no_builtins: bool, + pub time_passes: bool, + pub vectorize_loop: bool, + pub vectorize_slp: bool, + pub merge_functions: bool, + pub inline_threshold: Option, + // Instead of creating an object file by doing LLVM codegen, just + // make the object file bitcode. Provides easy compatibility with + // emscripten's ecc compiler, when used as the linker. + pub obj_is_bitcode: bool, + pub no_integrated_as: bool, + pub embed_bitcode: bool, + pub embed_bitcode_marker: bool, +} + +impl ModuleConfig { + fn new(passes: Vec) -> ModuleConfig { + ModuleConfig { + passes, + opt_level: None, + opt_size: None, + + pgo_gen: None, + pgo_use: String::new(), + + emit_no_opt_bc: false, + emit_pre_thin_lto_bc: false, + emit_bc: false, + emit_bc_compressed: false, + emit_lto_bc: false, + emit_ir: false, + emit_asm: false, + emit_obj: false, + obj_is_bitcode: false, + embed_bitcode: false, + embed_bitcode_marker: false, + no_integrated_as: false, + + verify_llvm_ir: false, + no_prepopulate_passes: false, + no_builtins: false, + time_passes: false, + vectorize_loop: false, + vectorize_slp: false, + merge_functions: false, + inline_threshold: None + } + } + + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { + self.verify_llvm_ir = sess.verify_llvm_ir(); + self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; + self.no_builtins = no_builtins || sess.target.target.options.no_builtins; + self.time_passes = sess.time_passes(); + self.inline_threshold = sess.opts.cg.inline_threshold; + self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || + sess.opts.debugging_opts.cross_lang_lto.enabled(); + let embed_bitcode = sess.target.target.options.embed_bitcode || + sess.opts.debugging_opts.embed_bitcode; + if embed_bitcode { + match sess.opts.optimize { + config::OptLevel::No | + config::OptLevel::Less => { + self.embed_bitcode_marker = embed_bitcode; + } + _ => self.embed_bitcode = embed_bitcode, + } + } + + // Copy what clang does by turning on loop vectorization at O2 and + // slp vectorization at O3. Otherwise configure other optimization aspects + // of this pass manager builder. + // Turn off vectorization for emscripten, as it's not very well supported. + self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && + (sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive) && + !sess.target.target.options.is_like_emscripten; + + self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && + sess.opts.optimize == config::OptLevel::Aggressive && + !sess.target.target.options.is_like_emscripten; + + self.merge_functions = sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive; + } +} + +/// Assembler name and command used by codegen when no_integrated_as is enabled +pub struct AssemblerCommand { + name: PathBuf, + cmd: Command, +} + +/// Additional resources used by optimize_and_codegen (not module specific) +#[derive(Clone)] +pub struct CodegenContext { + // Resources needed when running LTO + pub backend: B, + pub time_passes: bool, + pub lto: Lto, + pub no_landing_pads: bool, + pub save_temps: bool, + pub fewer_names: bool, + pub exported_symbols: Option>, + pub opts: Arc, + pub crate_types: Vec, + pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, + pub output_filenames: Arc, + pub regular_module_config: Arc, + pub metadata_module_config: Arc, + pub allocator_module_config: Arc, + pub tm_factory: Arc Result + Send + Sync>, + pub msvc_imps_needed: bool, + pub target_pointer_width: String, + pub debuginfo: config::DebugInfo, + + // Number of cgus excluding the allocator/metadata modules + pub total_cgus: usize, + // Handler to use for diagnostics produced during codegen. + pub diag_emitter: SharedEmitter, + // LLVM passes added by plugins. + pub plugin_passes: Vec, + // LLVM optimizations for which we want to print remarks. + pub remark: Passes, + // Worker thread number + pub worker: usize, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + pub incr_comp_session_dir: Option, + // Used to update CGU re-use information during the thinlto phase. + pub cgu_reuse_tracker: CguReuseTracker, + // Channel back to the main control thread to send messages to + pub coordinator_send: Sender>, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + pub time_graph: Option, + // The assembler command if no_integrated_as option is enabled, None otherwise + pub assembler_cmd: Option> +} + +impl CodegenContext { + pub fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } + + pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { + match kind { + ModuleKind::Regular => &self.regular_module_config, + ModuleKind::Metadata => &self.metadata_module_config, + ModuleKind::Allocator => &self.allocator_module_config, + } + } + +} + +pub struct DiagnosticHandlers<'a, B : WriteBackendMethods> { + pub data: *mut (&'a CodegenContext, &'a Handler), + pub llcx: &'a B::Context, +} + +impl<'a, B : WriteBackendMethods> Drop for DiagnosticHandlers<'a, B> { + fn drop(&mut self) { + B::drop_diagnostic_handlers(self); + } +} + +fn generate_lto_work( + cgcx: &CodegenContext, + modules: Vec>, + import_only_modules: Vec<(SerializedModule, WorkProduct)> +) -> Vec<(WorkItem, u64)> { + let mut timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + "generate lto") + }).unwrap_or(Timeline::noop()); + let (lto_modules, copy_jobs) = B::run_lto(cgcx, modules, import_only_modules, &mut timeline) + .unwrap_or_else(|e| e.raise()); + + let lto_modules = lto_modules.into_iter().map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }); + + let copy_jobs = copy_jobs.into_iter().map(|wp| { + (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { + name: wp.cgu_name.clone(), + source: wp, + }), 0) + }); + + lto_modules.chain(copy_jobs).collect() +} + +pub struct CompiledModules { + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, +} + +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateType::Rlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} + +fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { + if sess.opts.incremental.is_none() { + return false + } + + match sess.lto() { + Lto::Fat | + Lto::No => false, + Lto::Thin | + Lto::ThinLocal => true, + } +} + +pub fn start_async_codegen( + backend: B, + tcx: TyCtxt, + time_graph: Option, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize +) -> OngoingCodegen { + let sess = tcx.sess; + let crate_name = tcx.crate_name(LOCAL_CRATE); + let crate_hash = tcx.crate_hash(LOCAL_CRATE); + let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); + let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let linker_info = LinkerInfo::new(tcx, backend.clone()); + let crate_info = CrateInfo::new(tcx); + + // Figure out what we actually need to build. + let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(vec![]); + let mut allocator_config = ModuleConfig::new(vec![]); + + if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { + match *sanitizer { + Sanitizer::Address => { + modules_config.passes.push("asan".to_owned()); + modules_config.passes.push("asan-module".to_owned()); + } + Sanitizer::Memory => { + modules_config.passes.push("msan".to_owned()) + } + Sanitizer::Thread => { + modules_config.passes.push("tsan".to_owned()) + } + _ => {} + } + } + + if sess.opts.debugging_opts.profile { + modules_config.passes.push("insert-gcov-profiling".to_owned()) + } + + modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); + modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); + + modules_config.opt_level = Some(sess.opts.optimize); + modules_config.opt_size = Some(sess.opts.optimize); + + // Save all versions of the bytecode if we're saving our temporaries. + if sess.opts.cg.save_temps { + modules_config.emit_no_opt_bc = true; + modules_config.emit_pre_thin_lto_bc = true; + modules_config.emit_bc = true; + modules_config.emit_lto_bc = true; + metadata_config.emit_bc = true; + allocator_config.emit_bc = true; + } + + // Emit compressed bitcode files for the crate if we're emitting an rlib. + // Whenever an rlib is created, the bitcode is inserted into the archive in + // order to allow LTO against it. + if need_crate_bitcode_for_rlib(sess) { + modules_config.emit_bc_compressed = true; + allocator_config.emit_bc_compressed = true; + } + + modules_config.emit_pre_thin_lto_bc = + need_pre_thin_lto_bitcode_for_incr_comp(sess); + + modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + tcx.sess.target.target.options.no_integrated_as; + + for output_type in sess.opts.output_types.keys() { + match *output_type { + OutputType::Bitcode => { modules_config.emit_bc = true; } + OutputType::LlvmAssembly => { modules_config.emit_ir = true; } + OutputType::Assembly => { + modules_config.emit_asm = true; + // If we're not using the LLVM assembler, this function + // could be invoked specially with output_type_assembly, so + // in this case we still want the metadata object file. + if !sess.opts.output_types.contains_key(&OutputType::Assembly) { + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + } + } + OutputType::Object => { modules_config.emit_obj = true; } + OutputType::Metadata => { metadata_config.emit_obj = true; } + OutputType::Exe => { + modules_config.emit_obj = true; + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + }, + OutputType::Mir => {} + OutputType::DepInfo => {} + } + } + + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); + + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; + + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (codegen_worker_send, codegen_worker_receive) = channel(); + + let coordinator_thread = start_executing_work(backend.clone(), + tcx, + &crate_info, + shared_emitter, + codegen_worker_send, + coordinator_receive, + total_cgus, + sess.jobserver.clone(), + time_graph.clone(), + Arc::new(modules_config), + Arc::new(metadata_config), + Arc::new(allocator_config)); + + OngoingCodegen { + backend, + crate_name, + crate_hash, + metadata, + windows_subsystem, + linker_info, + crate_info, + + time_graph, + coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), + codegen_worker_receive, + shared_emitter_main, + future: coordinator_thread, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + } +} + +fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( + sess: &Session, + compiled_modules: &CompiledModules, +) -> FxHashMap { + let mut work_products = FxHashMap::default(); + + if sess.opts.incremental.is_none() { + return work_products; + } + + for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { + let mut files = vec![]; + + if let Some(ref path) = module.object { + files.push((WorkProductFileKind::Object, path.clone())); + } + if let Some(ref path) = module.bytecode { + files.push((WorkProductFileKind::Bytecode, path.clone())); + } + if let Some(ref path) = module.bytecode_compressed { + files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); + } + + if let Some((id, product)) = + copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { + work_products.insert(id, product); + } + } + + work_products +} + +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + let mut user_wants_bitcode = false; + let mut user_wants_objects = false; + + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); + } + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if compiled_modules.modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&compiled_modules.modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } + } + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in crate_output.outputs.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} + } + } + + // Clean up unwanted temporary files. + + // We create the following files by default: + // - #crate#.#module-name#.bc + // - #crate#.#module-name#.o + // - #crate#.crate.metadata.bc + // - #crate#.crate.metadata.o + // - #crate#.o (linked from crate.##.o) + // - #crate#.bc (copied from crate.##.bc) + // We may create additional files if requested by the user (through + // `-C save-temps` or `--emit=` flags). + + if !sess.opts.cg.save_temps { + // Remove the temporary .#module-name#.o objects. If the user didn't + // explicitly request bitcode (with --emit=bc), and the bitcode is not + // needed for building an rlib, then we must remove .#module-name#.bc as + // well. + + // Specific rules for keeping .#module-name#.bc: + // - If the user requested bitcode (`user_wants_bitcode`), and + // codegen_units > 1, then keep it. + // - If the user requested bitcode but codegen_units == 1, then we + // can toss .#module-name#.bc because we copied it to .bc earlier. + // - If we're not building an rlib and the user didn't request + // bitcode, then delete .#module-name#.bc. + // If you change how this works, also update back::link::link_rlib, + // where .#module-name#.bc files are (maybe) deleted after making an + // rlib. + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + + let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; + + let keep_numbered_objects = needs_crate_object || + (user_wants_objects && sess.codegen_units() > 1); + + for module in compiled_modules.modules.iter() { + if let Some(ref path) = module.object { + if !keep_numbered_objects { + remove(sess, path); + } + } + + if let Some(ref path) = module.bytecode { + if !keep_numbered_bitcode { + remove(sess, path); + } + } + } + + if !user_wants_bitcode { + if let Some(ref path) = compiled_modules.metadata_module.bytecode { + remove(sess, &path); + } + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if let Some(ref path) = allocator_module.bytecode { + remove(sess, path); + } + } + } + } + + // We leave the following files around by default: + // - #crate#.o + // - #crate#.crate.metadata.o + // - #crate#.bc + // These are used in linking steps and will be cleaned up afterward. +} + +pub fn dump_incremental_data( + _codegen_results: &CodegenResults +) { + // FIXME(mw): This does not work at the moment because the situation has + // become more complicated due to incremental LTO. Now a CGU + // can have more than two caching states. + // println!("[incremental] Re-using {} out of {} modules", + // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), + // codegen_results.modules.len()); +} + +pub enum WorkItem { + /// Optimize a newly codegened, totally unoptimized module. + Optimize(ModuleCodegen), + /// Copy the post-LTO artifacts from the incremental cache to the output + /// directory. + CopyPostLtoArtifacts(CachedModuleCodegen), + /// Perform (Thin)LTO on the given module. + LTO(lto::LtoModuleCodegen), +} + +impl WorkItem { + pub fn module_kind(&self) -> ModuleKind { + match *self { + WorkItem::Optimize(ref m) => m.kind, + WorkItem::CopyPostLtoArtifacts(_) | + WorkItem::LTO(_) => ModuleKind::Regular, + } + } + + pub fn name(&self) -> String { + match *self { + WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), + WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), + WorkItem::LTO(ref m) => format!("lto: {}", m.name()), + } + } +} + +enum WorkItemResult { + Compiled(CompiledModule), + NeedsLTO(ModuleCodegen), +} + +fn execute_work_item( + cgcx: &CodegenContext, + work_item: WorkItem, + timeline: &mut Timeline +) -> Result, FatalError> { + let module_config = cgcx.config(work_item.module_kind()); + + match work_item { + WorkItem::Optimize(module) => { + execute_optimize_work_item(cgcx, module, module_config, timeline) + } + WorkItem::CopyPostLtoArtifacts(module) => { + execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) + } + WorkItem::LTO(module) => { + execute_lto_work_item(cgcx, module, module_config, timeline) + } + } +} + +fn execute_optimize_work_item( + cgcx: &CodegenContext, + module: ModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + B::optimize(cgcx, &diag_handler, &module, module_config, timeline)?; + } + + let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); + + // After we've done the initial round of optimizations we need to + // decide whether to synchronously codegen this module or ship it + // back to the coordinator thread for further LTO processing (which + // has to wait for all the initial modules to be optimized). + // + // Here we dispatch based on the `cgcx.lto` and kind of module we're + // codegenning... + let needs_lto = match cgcx.lto { + Lto::No => false, + + // If the linker does LTO, we don't have to do it. Note that we + // keep doing full LTO, if it is requested, as not to break the + // assumption that the output will be a single module. + Lto::Thin | Lto::ThinLocal if linker_does_lto => false, + + // Here we've got a full crate graph LTO requested. We ignore + // this, however, if the crate type is only an rlib as there's + // no full crate graph to process, that'll happen later. + // + // This use case currently comes up primarily for targets that + // require LTO so the request for LTO is always unconditionally + // passed down to the backend, but we don't actually want to do + // anything about it yet until we've got a final product. + Lto::Fat | Lto::Thin => { + cgcx.crate_types.len() != 1 || + cgcx.crate_types[0] != config::CrateType::Rlib + } + + // When we're automatically doing ThinLTO for multi-codegen-unit + // builds we don't actually want to LTO the allocator modules if + // it shows up. This is due to various linker shenanigans that + // we'll encounter later. + // + // Additionally here's where we also factor in the current LLVM + // version. If it doesn't support ThinLTO we skip this. + Lto::ThinLocal => { + module.kind != ModuleKind::Allocator && + cgcx.backend.thin_lto_available() + } + }; + + // Metadata modules never participate in LTO regardless of the lto + // settings. + let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; + + if needs_lto { + Ok(WorkItemResult::NeedsLTO(module)) + } else { + let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config, timeline)? }; + Ok(WorkItemResult::Compiled(module)) + } +} + +fn execute_copy_from_cache_work_item( + cgcx: &CodegenContext, + module: CachedModuleCodegen, + module_config: &ModuleConfig, + _: &mut Timeline +) -> Result, FatalError> { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let mut object = None; + let mut bytecode = None; + let mut bytecode_compressed = None; + for (kind, saved_file) in &module.source.saved_files { + let obj_out = match kind { + WorkProductFileKind::Object => { + let path = cgcx.output_filenames.temp_path(OutputType::Object, + Some(&module.name)); + object = Some(path.clone()); + path + } + WorkProductFileKind::Bytecode => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)); + bytecode = Some(path.clone()); + path + } + WorkProductFileKind::BytecodeCompressed => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)) + .with_extension(RLIB_BYTECODE_EXTENSION); + bytecode_compressed = Some(path.clone()); + path + } + }; + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + module.name, + source_file, + obj_out.display()); + match link_or_copy(&source_file, &obj_out) { + Ok(_) => { } + Err(err) => { + let diag_handler = cgcx.create_diag_handler(); + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); + } + } + } + + assert_eq!(object.is_some(), module_config.emit_obj); + assert_eq!(bytecode.is_some(), module_config.emit_bc); + assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); + + Ok(WorkItemResult::Compiled(CompiledModule { + name: module.name, + kind: ModuleKind::Regular, + object, + bytecode, + bytecode_compressed, + })) +} + +fn execute_lto_work_item( + cgcx: &CodegenContext, + mut module: lto::LtoModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + let module = module.optimize(cgcx, timeline)?; + let module = B::codegen(cgcx, &diag_handler, module, module_config, timeline)?; + Ok(WorkItemResult::Compiled(module)) + } +} + +pub enum Message { + Token(io::Result), + NeedsLTO { + result: ModuleCodegen, + worker_id: usize, + }, + Done { + result: Result, + worker_id: usize, + }, + CodegenDone { + llvm_work_item: WorkItem, + cost: u64, + }, + AddImportOnlyModule { + module_data: SerializedModule, + work_product: WorkProduct, + }, + CodegenComplete, + CodegenItem, +} + +struct Diagnostic { + msg: String, + code: Option, + lvl: Level, +} + +#[derive(PartialEq, Clone, Copy, Debug)] +enum MainThreadWorkerState { + Idle, + Codegenning, + LLVMing, +} + +fn start_executing_work( + backend: B, + tcx: TyCtxt, + crate_info: &CrateInfo, + shared_emitter: SharedEmitter, + codegen_worker_send: Sender>, + coordinator_receive: Receiver>, + total_cgus: usize, + jobserver: Client, + time_graph: Option, + modules_config: Arc, + metadata_config: Arc, + allocator_config: Arc +) -> thread::JoinHandle> { + let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); + let sess = tcx.sess; + + // Compute the set of symbols we need to retain when doing LTO (if we need to) + let exported_symbols = { + let mut exported_symbols = FxHashMap::default(); + + let copy_symbols = |cnum| { + let symbols = tcx.exported_symbols(cnum) + .iter() + .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) + .collect(); + Arc::new(symbols) + }; + + match sess.lto() { + Lto::No => None, + Lto::ThinLocal => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + Some(Arc::new(exported_symbols)) + } + Lto::Fat | Lto::Thin => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + for &cnum in tcx.crates().iter() { + exported_symbols.insert(cnum, copy_symbols(cnum)); + } + Some(Arc::new(exported_symbols)) + } + } + }; + + // First up, convert our jobserver into a helper thread so we can use normal + // mpsc channels to manage our messages and such. + // After we've requested tokens then we'll, when we can, + // get tokens on `coordinator_receive` which will + // get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); + let helper = jobserver.into_helper_thread(move |token| { + let msg : Message = Message::Token(token); + drop(coordinator_send2.send(Box::new(msg))); + }).expect("failed to spawn helper thread"); + + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { + if link::ignored_for_lto(sess, crate_info, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let assembler_cmd = if modules_config.no_integrated_as { + // HACK: currently we use linker (gcc) as our assembler + let (linker, flavor) = link::linker_and_flavor(sess); + + let (name, mut cmd) = get_linker(sess, &linker, flavor); + cmd.args(&sess.target.target.options.asm_args); + Some(Arc::new(AssemblerCommand { + name, + cmd, + })) + } else { + None + }; + + let cgcx : CodegenContext = CodegenContext { + backend : backend.clone(), + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + fewer_names: sess.fewer_names(), + save_temps: sess.opts.cg.save_temps, + opts: Arc::new(sess.opts.clone()), + time_passes: sess.time_passes(), + exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), + coordinator_send, + diag_emitter: shared_emitter.clone(), + time_graph, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + tm_factory: backend.target_machine_factory(tcx.sess, false), + total_cgus, + msvc_imps_needed: msvc_imps_needed(tcx), + target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), + debuginfo: tcx.sess.opts.debuginfo, + assembler_cmd, + }; + + // This is the "main loop" of parallel work happening for parallel codegen. + // It's here that we manage parallelism, schedule work, and work with + // messages coming from clients. + // + // There are a few environmental pre-conditions that shape how the system + // is set up: + // + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Codegen can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. + // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing codegen work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to codegen CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will codegen one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since codegen usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. + // + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). + // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- + // It's worth also touching on the management of parallelism here. We don't + // want to just spawn a thread per work item because while that's optimal + // parallelism it may overload a system with too many threads or violate our + // configuration for the maximum amount of cpu to use for this process. To + // manage this we use the `jobserver` crate. + // + // Job servers are an artifact of GNU make and are used to manage + // parallelism between processes. A jobserver is a glorified IPC semaphore + // basically. Whenever we want to run some work we acquire the semaphore, + // and whenever we're done with that work we release the semaphore. In this + // manner we can ensure that the maximum number of parallel workers is + // capped at any one point in time. + // + // LTO and the coordinator thread + // ------------------------------ + // + // The final job the coordinator thread is responsible for is managing LTO + // and how that works. When LTO is requested what we'll to is collect all + // optimized LLVM modules into a local vector on the coordinator. Once all + // modules have been codegened and optimized we hand this to the `lto` + // module for further optimization. The `lto` module will return back a list + // of more modules to work on, which the coordinator will continue to spawn + // work for. + // + // Each LLVM module is automatically sent back to the coordinator for LTO if + // necessary. There's already optimizations in place to avoid sending work + // back to the coordinator if LTO isn't requested. + return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + + let max_workers = ::num_cpus::get(); + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id + } + }; + + // This is where we collect codegen units that have gone all the way + // through codegen and LLVM. + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + let mut needs_lto = Vec::new(); + let mut lto_import_only_modules = Vec::new(); + let mut started_lto = false; + + // This flag tracks whether all items have gone through codegens + let mut codegen_done = false; + + // This is the queue of LLVM work items that still need processing. + let mut work_items = Vec::<(WorkItem, u64)>::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. + let mut tokens = Vec::new(); + + let mut main_thread_worker_state = MainThreadWorkerState::Idle; + let mut running = 0; + + let mut llvm_start_time = None; + + // Run the message loop while there's still anything that needs message + // processing: + while !codegen_done || + work_items.len() > 0 || + running > 0 || + needs_lto.len() > 0 || + lto_import_only_modules.len() > 0 || + main_thread_worker_state != MainThreadWorkerState::Idle { + + // While there are still CGUs to be codegened, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For codegenning more CGU or for running them through LLVM. + if !codegen_done { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, codegen more items: + if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { + panic!("Could not send Message::CodegenItem to main thread") + } + main_thread_worker_state = MainThreadWorkerState::Codegenning; + } else { + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else { + // If we've finished everything related to normal codegen + // then it must be the case that we've got some LTO work to do. + // Perform the serial work here of figuring out what we're + // going to LTO and then push a bunch of work items onto our + // queue to do LTO + if work_items.len() == 0 && + running == 0 && + main_thread_worker_state == MainThreadWorkerState::Idle { + assert!(!started_lto); + assert!(needs_lto.len() + lto_import_only_modules.len() > 0); + started_lto = true; + let modules = mem::replace(&mut needs_lto, Vec::new()); + let import_only_modules = + mem::replace(&mut lto_import_only_modules, Vec::new()); + for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { + let insertion_index = work_items + .binary_search_by_key(&cost, |&(_, cost)| cost) + .unwrap_or_else(|e| e); + work_items.insert(insertion_index, (work, cost)); + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + } + } + + // In this branch, we know that everything has been codegened, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } else { + // There is no unstarted work, so let the main thread + // take over for a running worker. Otherwise the + // implicit token would just go to waste. + // We reduce the `running` counter by one. The + // `tokens.truncate()` below will take care of + // giving the Token back. + debug_assert!(running > 0); + running -= 1; + main_thread_worker_state = MainThreadWorkerState::LLVMing; + } + } + MainThreadWorkerState::Codegenning => { + bug!("codegen worker should not be codegenning after \ + codegen was already completed") + } + MainThreadWorkerState::LLVMing => { + // Already making good use of that token + } + } + } + + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while work_items.len() > 0 && running < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + + spawn_work(cgcx, item); + running += 1; + } + + // Relinquish accidentally acquired extra tokens + tokens.truncate(running); + + let msg = coordinator_receive.recv().unwrap(); + match *msg.downcast::>().ok().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + match token { + Ok(token) => { + tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to codegen demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!("{}", msg) + } + } + } + + Message::CodegenDone { llvm_work_item, cost } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); + + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + Message::CodegenComplete => { + codegen_done = true; + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { result: Ok(compiled_module), worker_id } => { + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } + } + Message::NeedsLTO { result, worker_id } => { + assert!(!started_lto); + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + free_worker_ids.push(worker_id); + needs_lto.push(result); + } + Message::AddImportOnlyModule { module_data, work_product } => { + assert!(!started_lto); + assert!(!codegen_done); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + lto_import_only_modules.push((module_data, work_product)); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + Message::Done { result: Err(()), worker_id: _ } => { + shared_emitter.fatal("aborting due to worker thread failure"); + // Exit the coordinator thread + return Err(()) + } + Message::CodegenItem => { + bug!("the coordinator should not receive codegen requests") + } + } + } + + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); + } + + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); + + Ok(CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module, + allocator_module: compiled_allocator_module, + }) + }); + + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of codegen + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue > 0 && + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } + + fn maybe_start_llvm_timer(config: &ModuleConfig, + llvm_start_time: &mut Option) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } + } + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + +// Set up a destructor which will fire off a message that we're done as +// we exit. +struct Bomb { + coordinator_send: Sender>, + result: Option>, + worker_id: usize, +} +impl Drop for Bomb { + fn drop(&mut self) { + let worker_id = self.worker_id; + let msg : Message = match self.result.take() { + Some(WorkItemResult::Compiled(m)) => { + Message::Done { result: Ok(m), worker_id } + } + Some(WorkItemResult::NeedsLTO(m)) => { + Message::NeedsLTO { result: m, worker_id } + } + None => Message::Done { result: Err(()), worker_id } + }; + drop(self.coordinator_send.send(Box::new(msg))); + } +} + +fn spawn_work( + cgcx: CodegenContext, + work: WorkItem +) { + let depth = time_depth(); + + thread::spawn(move || { + set_time_depth(depth); + + let mut bomb : Bomb = Bomb { + coordinator_send: cgcx.coordinator_send.clone(), + result: None, + worker_id: cgcx.worker, + }; + + // Execute the work itself, and if it finishes successfully then flag + // ourselves as a success as well. + // + // Note that we ignore any `FatalError` coming out of `execute_work_item`, + // as a diagnostic was already sent off to the main thread - just + // surface that there was an error in this worker. + bomb.result = { + let timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND, + &work.name()) + }); + let mut timeline = timeline.unwrap_or(Timeline::noop()); + execute_work_item(&cgcx, work, &mut timeline).ok() + }; + }); +} + +pub fn run_assembler( + cgcx: &CodegenContext, + handler: &Handler, + assembly: &Path, + object: &Path +) { + let assembler = cgcx.assembler_cmd + .as_ref() + .expect("cgcx.assembler_cmd is missing?"); + + let pname = &assembler.name; + let mut cmd = assembler.cmd.clone(); + cmd.arg("-c").arg("-o").arg(object).arg(assembly); + debug!("{:?}", cmd); + + match cmd.output() { + Ok(prog) => { + if !prog.status.success() { + let mut note = prog.stderr.clone(); + note.extend_from_slice(&prog.stdout); + + handler.struct_err(&format!("linking with `{}` failed: {}", + pname.display(), + prog.status)) + .note(&format!("{:?}", &cmd)) + .note(str::from_utf8(¬e[..]).unwrap()) + .emit(); + handler.abort_if_errors(); + } + }, + Err(e) => { + handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); + handler.abort_if_errors(); + } + } +} + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender, +} + +pub struct SharedEmitterMain { + receiver: Receiver, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + pub fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + pub fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session, blocking: bool) { + loop { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + code.clone(), + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} + +pub struct OngoingCodegen { + pub backend : B, + pub crate_name: Symbol, + pub crate_hash: Svh, + pub metadata: EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: LinkerInfo, + pub crate_info: CrateInfo, + pub time_graph: Option, + pub coordinator_send: Sender>, + pub codegen_worker_receive: Receiver>, + pub shared_emitter_main: SharedEmitterMain, + pub future: thread::JoinHandle>, + pub output_filenames: Arc, +} + +impl OngoingCodegen { + pub fn join( + self, + sess: &Session + ) -> (CodegenResults, FxHashMap) { + self.shared_emitter_main.check(sess, true); + let compiled_modules = match self.future.join() { + Ok(Ok(compiled_modules)) => compiled_modules, + Ok(Err(())) => { + sess.abort_if_errors(); + panic!("expected abort due to worker thread errors") + }, + Err(_) => { + sess.fatal("Error during codegen/LLVM phase."); + } + }; + + sess.cgu_reuse_tracker.check_expected_reuse(sess); + + sess.abort_if_errors(); + + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + + let work_products = + copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, + &compiled_modules); + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.codegen_units() == 1 && sess.time_llvm_passes() { + self.backend.print_pass_timings() + } + + (CodegenResults { + crate_name: self.crate_name, + crate_hash: self.crate_hash, + metadata: self.metadata, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + crate_info: self.crate_info, + + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + metadata_module: compiled_modules.metadata_module, + }, work_products) + } + + pub fn submit_pre_codegened_module_to_llvm(&self, + tcx: TyCtxt, + module: ModuleCodegen) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + submit_codegened_module_to_llvm(&self.backend, tcx, module, cost); + } + + pub fn codegen_finished(&self, tcx: TyCtxt) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + let msg : Message = Message::CodegenComplete; + drop(self.coordinator_send.send(Box::new(msg))); + } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } + + pub fn wait_for_signal_to_codegen_item(&self) { + match self.codegen_worker_receive.recv() { + Ok(Message::CodegenItem) => { + // Nothing to do + } + Ok(_) => panic!("unexpected message"), + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } +} + +pub fn submit_codegened_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: ModuleCodegen, + cost: u64 +) { + let llvm_work_item = WorkItem::Optimize(module); + let msg : Message = Message::CodegenDone { + llvm_work_item, + cost, + }; + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(msg))); +} + +pub fn submit_post_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); + let msg : Message = Message::CodegenDone { + llvm_work_item, + cost: 0, + }; + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(msg))); +} + +pub fn submit_pre_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let filename = pre_lto_bitcode_filename(&module.name); + let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); + let file = fs::File::open(&bc_path).unwrap_or_else(|e| { + panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) + }); + + let mmap = unsafe { + memmap::Mmap::map(&file).unwrap_or_else(|e| { + panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) + }) + }; + let msg : Message = Message::AddImportOnlyModule { + module_data: SerializedModule::FromUncompressedFile(mmap), + work_product: module.source, + }; + // Schedule the module to be loaded + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(msg))); +} + +pub fn pre_lto_bitcode_filename(module_name: &str) -> String { + format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) +} + +fn msvc_imps_needed(tcx: TyCtxt) -> bool { + // This should never be true (because it's not supported). If it is true, + // something is wrong with commandline arg validation. + assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.opts.cg.prefer_dynamic)); + + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() +} diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index cbe41065f75c8..2ffdf94f6c662 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -39,6 +39,8 @@ use rustc::util::profiling::ProfileCategory; use rustc::session::config::{self, EntryFnType, Lto}; use rustc::session::Session; use mir::place::PlaceRef; +use back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm, + submit_post_lto_module_to_llvm}; use {MemFlags, CrateInfo}; use callee; use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; @@ -51,7 +53,7 @@ use rustc_mir::monomorphize::Instance; use rustc_mir::monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; use mono_item::{MonoItem, BaseMonoItemExt}; -use rustc::util::nodemap::{FxHashMap, FxHashSet, DefIdSet}; +use rustc::util::nodemap::{FxHashMap, DefIdSet}; use rustc_data_structures::sync::Lrc; use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr}; use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; @@ -593,7 +595,7 @@ pub fn codegen_crate( backend: B, tcx: TyCtxt<'ll, 'tcx, 'tcx>, rx: mpsc::Receiver> -) -> B::OngoingCodegen { +) -> OngoingCodegen { check_for_rustc_errors_attr(tcx); @@ -640,19 +642,20 @@ pub fn codegen_crate( // Skip crate items and just output metadata in -Z no-codegen mode. if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = backend.start_async_codegen( + let ongoing_codegen = start_async_codegen( + backend, tcx, time_graph.clone(), metadata, rx, 1); - backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); - backend.codegen_finished(&ongoing_codegen, tcx); + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + ongoing_codegen.codegen_finished(tcx); assert_and_save_dep_graph(tcx); - backend.check_for_errors(&ongoing_codegen, tcx.sess); + ongoing_codegen.check_for_errors(tcx.sess); return ongoing_codegen; } @@ -674,7 +677,8 @@ pub fn codegen_crate( } } - let ongoing_codegen = backend.start_async_codegen( + let ongoing_codegen = start_async_codegen( + backend.clone(), tcx, time_graph.clone(), metadata, @@ -722,10 +726,10 @@ pub fn codegen_crate( }; if let Some(allocator_module) = allocator_module { - backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, allocator_module); + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); } - backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module); + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); // We sort the codegen units by size. This way we can schedule work for LLVM // a bit more efficiently. @@ -739,8 +743,8 @@ pub fn codegen_crate( let mut all_stats = Stats::default(); for cgu in codegen_units.into_iter() { - backend.wait_for_signal_to_codegen_item(&ongoing_codegen); - backend.check_for_errors(&ongoing_codegen, tcx.sess); + ongoing_codegen.wait_for_signal_to_codegen_item(); + ongoing_codegen.check_for_errors(tcx.sess); let cgu_reuse = determine_cgu_reuse(tcx, &cgu); tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); @@ -759,14 +763,14 @@ pub fn codegen_crate( false } CguReuse::PreLto => { - backend.submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen { + submit_pre_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { name: cgu.name().to_string(), source: cgu.work_product(tcx), }); true } CguReuse::PostLto => { - backend.submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen { + submit_post_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { name: cgu.name().to_string(), source: cgu.work_product(tcx), }); @@ -775,7 +779,7 @@ pub fn codegen_crate( }; } - backend.codegen_finished(&ongoing_codegen, tcx); + ongoing_codegen.codegen_finished(tcx); // Since the main thread is sometimes blocked during codegen, we keep track // -Ztime-passes output manually. @@ -809,7 +813,7 @@ pub fn codegen_crate( } } - backend.check_for_errors(&ongoing_codegen, tcx.sess); + ongoing_codegen.check_for_errors(tcx.sess); assert_and_save_dep_graph(tcx); ongoing_codegen @@ -892,7 +896,7 @@ fn collect_and_partition_mono_items<'ll, 'tcx>( }).collect(); if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { - let mut item_to_cgus: FxHashMap<_, Vec<_>> = FxHashMap(); + let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); for cgu in &codegen_units { for (&mono_item, &linkage) in cgu.items() { @@ -955,17 +959,17 @@ impl CrateInfo { compiler_builtins: None, profiler_runtime: None, sanitizer_runtime: None, - is_no_builtins: FxHashSet(), - native_libraries: FxHashMap(), + is_no_builtins: Default::default(), + native_libraries: Default::default(), used_libraries: tcx.native_libraries(LOCAL_CRATE), link_args: tcx.link_args(LOCAL_CRATE), - crate_name: FxHashMap(), + crate_name: Default::default(), used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), - used_crate_source: FxHashMap(), - wasm_imports: FxHashMap(), - lang_item_to_crate: FxHashMap(), - missing_lang_items: FxHashMap(), + used_crate_source: Default::default(), + wasm_imports: Default::default(), + lang_item_to_crate: Default::default(), + missing_lang_items: Default::default(), }; let lang_items = tcx.lang_items(); diff --git a/src/librustc_codegen_ssa/diagnostics.rs b/src/librustc_codegen_ssa/diagnostics.rs index 5718d3e50236c..fd343a45561ce 100644 --- a/src/librustc_codegen_ssa/diagnostics.rs +++ b/src/librustc_codegen_ssa/diagnostics.rs @@ -34,4 +34,15 @@ fn main() { ``` "##, +E0669: r##" +Cannot convert inline assembly operand to a single LLVM value. + +This error usually happens when trying to pass in a value to an input inline +assembly operand that is actually a pair of values. In particular, this can +happen when trying to pass in a slice, for instance a `&str`. In Rust, these +values are represented internally as a pair of values, the pointer and its +length. When passed as an input operand, this pair of values can not be +coerced into a register and thus we must fail with an error. +"## + } diff --git a/src/librustc_codegen_ssa/interfaces/backend.rs b/src/librustc_codegen_ssa/interfaces/backend.rs index fbdccfca39da8..f11666d237844 100644 --- a/src/librustc_codegen_ssa/interfaces/backend.rs +++ b/src/librustc_codegen_ssa/interfaces/backend.rs @@ -9,7 +9,7 @@ // except according to those terms. use super::CodegenObject; -use {ModuleCodegen, CachedModuleCodegen}; +use super::write::WriteBackendMethods; use rustc::session::Session; use rustc_codegen_utils::codegen_backend::CodegenBackend; use rustc::middle::cstore::EncodedMetadata; @@ -17,9 +17,7 @@ use rustc::middle::allocator::AllocatorKind; use rustc::ty::TyCtxt; use rustc::mir::mono::Stats; use syntax_pos::symbol::InternedString; -use rustc::util::time_graph::TimeGraph; -use std::sync::mpsc::Receiver; -use std::any::Any; +use std::sync::Arc; pub trait Backend<'ll> { type Value : 'll + CodegenObject; @@ -28,42 +26,29 @@ pub trait Backend<'ll> { type Context; } -pub trait ExtraBackendMethods : CodegenBackend { - type Metadata; - type OngoingCodegen; - +pub trait ExtraBackendMethods : CodegenBackend + WriteBackendMethods + Sized + Send + Sync { fn thin_lto_available(&self) -> bool; fn pgo_available(&self) -> bool; - fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Metadata; + fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Module; fn write_metadata<'b, 'gcx>( &self, tcx: TyCtxt<'b, 'gcx, 'gcx>, - metadata: &Self::Metadata + metadata: &Self::Module ) -> EncodedMetadata; - fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Metadata, kind: AllocatorKind); - - fn start_async_codegen( - &self, - tcx: TyCtxt, - time_graph: Option, - metadata: EncodedMetadata, - coordinator_receive: Receiver>, - total_cgus: usize - ) -> Self::OngoingCodegen; - fn submit_pre_codegened_module_to_llvm( - &self, - codegen: &Self::OngoingCodegen, - tcx: TyCtxt, - module: ModuleCodegen - ); - fn submit_pre_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen); - fn submit_post_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen); - fn codegen_finished(&self, codegen: &Self::OngoingCodegen, tcx: TyCtxt); - fn check_for_errors(&self, codegen: &Self::OngoingCodegen, sess: &Session); - fn wait_for_signal_to_codegen_item(&self, codegen: &Self::OngoingCodegen); + fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Module, kind: AllocatorKind); fn compile_codegen_unit<'ll, 'tcx: 'll>( &self, tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString ) -> Stats ; + // If find_features is true this won't access `sess.crate_types` by assuming + // that `is_pie_binary` is false. When we discover LLVM target features + // `sess.crate_types` is uninitialized so we cannot access it. + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool + ) -> Arc + Result + Send + Sync>; + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str; } diff --git a/src/librustc_codegen_ssa/interfaces/misc.rs b/src/librustc_codegen_ssa/interfaces/misc.rs index f22933d2641ee..73ae29888a72f 100644 --- a/src/librustc_codegen_ssa/interfaces/misc.rs +++ b/src/librustc_codegen_ssa/interfaces/misc.rs @@ -19,8 +19,9 @@ use std::sync::Arc; use rustc_mir::monomorphize::partitioning::CodegenUnit; pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { - fn vtables(&self) -> &RefCell, - Option>), Self::Value>>; + fn vtables(&self) -> &RefCell< + FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), Self::Value> + >; fn check_overflow(&self) -> bool; fn instances(&self) -> &RefCell, Self::Value>>; fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; diff --git a/src/librustc_codegen_ssa/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs index cb1f292a1e1bb..62088d998bf27 100644 --- a/src/librustc_codegen_ssa/interfaces/mod.rs +++ b/src/librustc_codegen_ssa/interfaces/mod.rs @@ -36,6 +36,7 @@ mod intrinsic; mod debuginfo; mod abi; mod asm; +mod write; use rustc::ty::Ty; use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; @@ -52,6 +53,7 @@ pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; pub use self::abi::{AbiMethods, AbiBuilderMethods}; pub use self::asm::{AsmMethods, AsmBuilderMethods}; +pub use self::write::{WriteBackendMethods, ThinBufferMethods, ModuleBufferMethods}; pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} diff --git a/src/librustc_codegen_ssa/interfaces/write.rs b/src/librustc_codegen_ssa/interfaces/write.rs new file mode 100644 index 0000000000000..a0c08670a20df --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/write.rs @@ -0,0 +1,78 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use back::write::{ModuleConfig, CodegenContext, DiagnosticHandlers}; +use back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; +use {ModuleCodegen, CompiledModule}; + +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use rustc_errors::{FatalError, Handler}; + +pub trait WriteBackendMethods : 'static + Sized + Clone { + type Module : Send + Sync; + type TargetMachine : Clone; + type ModuleBuffer : ModuleBufferMethods; + type Context : ?Sized; + type ThinData : Send + Sync; + type ThinBuffer : ThinBufferMethods; + + /// Performs LTO, which in the case of full LTO means merging all modules into + /// a single one and returning it for further optimizing. For ThinLTO, it will + /// do the global analysis necessary and return two lists, one of the modules + /// the need optimization and another for modules that can simply be copied over + /// from the incr. comp. cache. + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline + ) -> Result<(Vec>, Vec), FatalError>; + fn new_diagnostic_handlers<'a>( + cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a Self::Context + ) -> DiagnosticHandlers<'a, Self>; + fn drop_diagnostic_handlers<'a>(diag: &mut DiagnosticHandlers<'a, Self>); + fn print_pass_timings(&self); + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result<(), FatalError>; + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline + ) -> Result, FatalError>; + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result; + fn run_lto_pass_manager( + cgcx: &CodegenContext, + llmod: &ModuleCodegen, + config: &ModuleConfig, + thin: bool + ); +} + +pub trait ThinBufferMethods : Send + Sync { + fn data(&self) -> &[u8]; +} + +pub trait ModuleBufferMethods : Send + Sync { + fn data(&self) -> &[u8]; +} diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs index 2a42ad91e3d58..7e8f76fd97630 100644 --- a/src/librustc_codegen_ssa/lib.rs +++ b/src/librustc_codegen_ssa/lib.rs @@ -39,7 +39,16 @@ extern crate syntax_pos; extern crate rustc_incremental; extern crate rustc_codegen_utils; extern crate rustc_data_structures; +extern crate rustc_allocator; +extern crate rustc_fs_util; +extern crate serialize; +extern crate rustc_errors; +extern crate rustc_demangle; +extern crate cc; extern crate libc; +extern crate jobserver; +extern crate memmap; +extern crate num_cpus; use std::path::PathBuf; use rustc::dep_graph::WorkProduct; @@ -48,7 +57,9 @@ use rustc::middle::lang_items::LangItem; use rustc::hir::def_id::CrateNum; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::sync::Lrc; +use rustc_data_structures::svh::Svh; use rustc::middle::cstore::{LibSource, CrateSource, NativeLibrary}; +use syntax_pos::symbol::Symbol; // NB: This module needs to be declared first so diagnostics are // registered before they are used. @@ -63,6 +74,7 @@ pub mod callee; pub mod glue; pub mod meth; pub mod mono_item; +pub mod back; pub struct ModuleCodegen { /// The name of the module. When the crate may be saved between @@ -159,4 +171,17 @@ pub struct CrateInfo { pub missing_lang_items: FxHashMap>, } + +pub struct CodegenResults { + pub crate_name: Symbol, + pub modules: Vec, + pub allocator_module: Option, + pub metadata_module: CompiledModule, + pub crate_hash: Svh, + pub metadata: rustc::middle::cstore::EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: back::linker::LinkerInfo, + pub crate_info: CrateInfo, +} + __build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index ae5288578e443..dc69baa606d66 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -100,20 +100,20 @@ pub fn get_vtable<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx> // Not in the cache. Build it. let nullptr = cx.const_null(cx.type_i8p()); - let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); + let methods = tcx.vtable_methods(trait_ref.with_self_ty(*tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { opt_mth.map_or(nullptr, |(def_id, substs)| { callee::resolve_and_get_fn(cx, def_id, substs) }) }); - let (size, align) = cx.size_and_align_of(ty); + let (size, align) = cx.layout_of(ty).size_and_align(); // ///////////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_mir/interpret/traits.rs // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ - callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), + cx.get_fn(monomorphize::resolve_drop_in_place(*cx.tcx(), ty)), cx.const_usize(size.bytes()), cx.const_usize(align.abi()) ].iter().cloned().chain(methods).collect(); diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs index f017eedc5248d..e3f598f842fc1 100644 --- a/src/librustc_codegen_ssa/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -16,7 +16,6 @@ use super::OperandValue; use rustc::ty::Ty; use rustc::ty::layout::{TyLayout, HasTyCtxt, LayoutOf}; use interfaces::*; -use value::Value; impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> FunctionCx<'a, 'f, 'll, 'tcx, Cx> where @@ -90,7 +89,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> let input_vals = inputs.iter() .try_fold(Vec::with_capacity(inputs.len()), |mut acc, input| { - let op = self.codegen_operand(&mutbx, input); + let op = self.codegen_operand(&mut bx, input); if let OperandValue::Immediate(_) = op.val { acc.push(op.immediate()); Ok(acc) @@ -104,7 +103,7 @@ impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> "invalid value for constraint in inline assembly"); } else { let input_vals = input_vals.unwrap(); - let res = xb.codegen_inline_asm(asm, outputs, input_vals); + let res = bx.codegen_inline_asm(asm, outputs, input_vals); if !res { span_err!(bx.cx().sess(), statement.source_info.span, E0668, "malformed inline assembly"); From 1cdea08ebe9f7dc93262e476e01be71185642157 Mon Sep 17 00:00:00 2001 From: Denis Merigoux Date: Mon, 22 Oct 2018 18:35:28 +0200 Subject: [PATCH 76/76] Added README explaining the refactoring --- src/librustc_codegen_llvm/back/lto.rs | 12 +-- src/librustc_codegen_ssa/README.md | 121 ++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 6 deletions(-) create mode 100644 src/librustc_codegen_ssa/README.md diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index d1756127ff5aa..80b37047642be 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -709,25 +709,25 @@ pub unsafe fn optimize_thin_module( let msg = "failed to prepare thin LTO module".to_string(); return Err(write::llvm_err(&diag_handler, msg)) } - save_temp_bitcode(&cgcx, &module, "thin-lto-after-rename"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); timeline.record("rename"); if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { let msg = "failed to prepare thin LTO module".to_string(); return Err(write::llvm_err(&diag_handler, msg)) } - save_temp_bitcode(&cgcx, &module, "thin-lto-after-resolve"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); timeline.record("resolve"); if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { let msg = "failed to prepare thin LTO module".to_string(); return Err(write::llvm_err(&diag_handler, msg)) } - save_temp_bitcode(&cgcx, &module, "thin-lto-after-internalize"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); timeline.record("internalize"); if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod) { let msg = "failed to prepare thin LTO module".to_string(); return Err(write::llvm_err(&diag_handler, msg)) } - save_temp_bitcode(&cgcx, &module, "thin-lto-after-import"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); timeline.record("import"); // Ok now this is a bit unfortunate. This is also something you won't @@ -760,7 +760,7 @@ pub unsafe fn optimize_thin_module( // so it appears). Hopefully we can remove this once upstream bugs are // fixed in LLVM. llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); - save_temp_bitcode(&cgcx, &module, "thin-lto-after-patch"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-patch"); timeline.record("patch"); // Alright now that we've done everything related to the ThinLTO @@ -771,7 +771,7 @@ pub unsafe fn optimize_thin_module( info!("running thin lto passes over {}", module.name); let config = cgcx.config(module.kind); run_pass_manager(cgcx, &module, config, true); - save_temp_bitcode(&cgcx, &module, "thin-lto-after-pm"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); timeline.record("thin-done"); } Ok(module) diff --git a/src/librustc_codegen_ssa/README.md b/src/librustc_codegen_ssa/README.md new file mode 100644 index 0000000000000..32e9c11f34814 --- /dev/null +++ b/src/librustc_codegen_ssa/README.md @@ -0,0 +1,121 @@ +# Refactoring of `rustc_codegen_llvm` +by Denis Merigoux, October 23rd 2018 + +## State of the code before the refactoring + +All the code related to the compilation of MIR into LLVM IR was contained inside the `rustc_codegen_llvm` crate. Here is the breakdown of the most important elements: +* the `back` folder (7,800 LOC) implements the mechanisms for creating the different object files and archive through LLVM, but also the communication mechanisms for parallel code generation; +* the `debuginfo` (3,200 LOC) folder contains all code that passes debug information down to LLVM; +* the `llvm` (2,200 LOC) folder defines the FFI necessary to communicate with LLVM using the C++ API; +* the `mir` (4,300 LOC) folder implements the actual lowering from MIR to LLVM IR; +* the `base.rs` (1,300 LOC) file contains some helper functions but also the high-level code that launches the code generation and distributes the work. +* the `builder.rs` (1,200 LOC) file contains all the functions generating individual LLVM IR instructions inside a basic block; +* the `common.rs` (450 LOC) contains various helper functions and all the functions generating LLVM static values; +* the `type_.rs` (300 LOC) defines most of the type translations to LLVM IR. + +The goal of this refactoring is to separate inside this crate code that is specific to the LLVM from code that can be reused for other rustc backends. For instance, the `mir` folder is almost entirely backend-specific but it relies heavily on other parts of the crate. The separation of the code must not affect the logic of the code nor its performance. + +For these reasons, the separation process involves two transformations that have to be done at the same time for the resulting code to compile : + +1. replace all the LLVM-specific types by generics inside function signatures and structure definitions; +2. encapsulate all functions calling the LLVM FFI inside a set of traits that will define the interface between backend-agnostic code and the backend. + +While the LLVM-specific code will be left in `rustc_codegen_llvm`, all the new interfaces and backend-agnostic code will be moved in `rustc_codegen_ssa` (name suggestion by @eddyb). + +## Generic types and structures + +@irinagpopa started to parametrize the types of `rustc_codegen_llvm` by a generic `Value` type, implemented in LLVM by a reference `&'ll Value`. This work has been extended to all structures inside the `mir` folder and elsewhere, as well as for LLVM's `BasicBlock` and `Type` types. + +The two most important structures for the LLVM codegen are `CodegenCx` and `Builder`. They are parametrized by multiple liftime parameters and the type for `Value`. + +```rust +struct CodegenCx<'ll, 'tcx: 'll, V : 'll> { + /* ... */ +} + +struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll> { + cx: &'a CodegenCx<'ll, 'tcx, V>, + /* ... */ +} +``` + +`CodegenCx` is used to compile one codegen-unit that can contain multiple functions, whereas `Builder` is created to compile one basic block. + +The code in `rustc_codegen_llvm` has to deal with multiple explicit lifetime parameters, that correspond to the following: +* `'tcx` is the longest lifetime, that corresponds to the original `TyCtxt` containing the program's information; +* `'a` is a short-lived reference of a `CodegenCx` or another object inside a struct; +* `'ll` is the lifetime of references to LLVM objects such as `Value` or `Type`. + +Although there are already many lifetime parameters in the code, making it generic uncovered situations where the borrow-checker was passing only due to the special nature of the LLVM objects manipulated (they are extern pointers). For instance, a additional lifetime parameter had to be added to `LocalAnalyser` in `analyse.rs`, leading to the definition: + +```rust +struct LocalAnalyzer<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll> { + /* ... */ +} +``` + +However, the two most important structures `CodegenCx` and `Builder` are not defined in the backend-agnostic code. Indeed, their content is highly specific of the backend and it makes more sense to leave their definition to the backend implementor than to allow just a narrow spot via a generic field for the backend's context. + +## Traits and interface + +Because they have to be defined by the backend, `CodegenCx` and `Builder` will be the structures implementing all the traits defining the backend's interface. These traits are defined in the folder `rustc_codegen_ssa/interfaces` and all the backend-agnostic code is parametrized by them. For instance, let us explain how a function in `base.rs` is parametrized: + +```rust +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx> +) where &'a Bx::CodegenCx : + LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { + /* ... */ +} +``` + +In this signature, we have the three lifetime parameters explained earlier and the master type `Bx` which satisfies the trait `BuilderMethods` corresponding to the interface satisfied by the `Builder` struct. The `BuilderMethods` defines an associated type `Bx::CodegenCx` that itself satisfies the `CodegenMethods` traits implemented by the struct `CodegenCx`. This prototype contains a `where` clause because the `LayoutOf` trait is satisfied by a reference (`&'a Bx::CodegenCx`) of the associated type and that we can't specify that in the trait definition of `BuilderMethods`. Finally, we have to specify that the associated types inside `LayoutOf` are the actual types of Rust, using the `Ty = Ty<'tcx>` syntax. + +On the trait side, here is an example with part of the definition of `BuilderMethods` in `interfaces/builder.rs`: + +```rust +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + + DebugInfoBuilderMethods<'a, 'll, 'tcx> + ArgTypeMethods<'a, 'll, 'tcx> + + AbiBuilderMethods<'a, 'll, 'tcx> + IntrinsicCallMethods<'a, 'll, 'tcx> + + AsmBuilderMethods<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn new_block<'b>( + cx: &'a Self::CodegenCx, + llfn: >::Value, + name: &'b str + ) -> Self; + /* ... */ + fn cond_br( + &mut self, + cond: >::Value, + then_llbb: >::BasicBlock, + else_llbb: >::BasicBlock, + ); + /* ... */ +} +``` + +Finally, a master structure implementing the `ExtraBackendMethods` trait is used for high-level codegen-driving functions like `codegen_crate` in `base.rs`. For LLVM, it is the empty `LlvmCodegenBackend`. `ExtraBackendMethods` should be implemented by the same structure that implements the `CodegenBackend` defined in `rustc_codegen_utils/codegen_backend.rs`. + +During the traitification process, certain functions have been converted from methods of a local structure to methods of `CodegenCx` or `Builder` and a corresponding `self` parameter has been added. Indeed, LLVM stores information internally that it can access when called through its API. This information does not show up in a Rust data structure carried around when these methods are called. However, when implementing a Rust backend for `rustc`, these methods will need information from `CodegenCx`, hence the additional parameter (unused in the LLVM implementation of the trait). + +## State of the code after the refactoring + +The traits offer an API which is very similar to the API of LLVM. This is not the best solution since LLVM has a very special way of doing things: when addding another backend, the traits definition might be changed in order to offer more flexibility. + +However, the current separation between backend-agnostic and LLVM-specific code has allows the reuse of a significant part of the old `rustc_codegen_llvm`. Here is the new LOC breakdown between backend-agnostic (BA) and LLVM for the most important elements: + +* `back` folder: 3,800 (BA) vs 4,100 (LLVM); +* `mir` folder: 4,400 (BA) vs 0 (LLVM); +* `base.rs`: 1,100 (BA) vs 250 (LLVM); +* `builder.rs`: 1,400 (BA) vs 0 (LLVM); +* `common.rs`: 350 (BA) vs 350 (LLVM); + +The `debuginfo` folder has been left almost untouched by the splitting and is specific to LLVM. Only its high-level features have been traitified. + +The new `interfaces` folder has 1500 LOC only for trait definitions. Overall, the 27,000 LOC-sized old `rustc_codegen_llvm` code has been split into the new 18,500 LOC-sized new `rustc_codegen_llvm` and the 12,000 LOC-sized `rustc_codegen_ssa`. We can say that this refactoring allowed the reuse of approximately 10,000 LOC that would otherwise have had to be duplicated between the multiple backends of `rustc`. + +The refactored version of `rustc`'s backend introduced no regression over the test suite nor in performance benchmark, which is in coherence with the nature of the refactoring that used only compile-time parametricity (no trait objects).