diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 479328a557cfa..2d28011407e7b 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -675,15 +675,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { return OperandRef::new_zst(self, place.layout); } - fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { - let vr = scalar.valid_range(bx); - match scalar.primitive() { - abi::Int(..) => { - if !scalar.is_always_valid(bx) { + fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar, vr: Option) { + match (vr, scalar.primitive()) { + (Some(vr), abi::Int(..)) => { + if !vr.is_full_for(scalar.size(bx)) { bx.range_metadata(load, vr); } } - abi::Pointer if vr.start < vr.end && !vr.contains(0) => { + (Some(vr), abi::Pointer) if vr.start < vr.end && !vr.contains(0) => { bx.nonnull_metadata(load); } _ => {} @@ -697,7 +696,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { else if place.layout.is_gcc_immediate() { let load = self.load(place.llval.get_type(), place.llval, place.align); if let abi::Abi::Scalar(ref scalar) = place.layout.abi { - scalar_load_metadata(self, load, scalar); + let vr = place.scalar_valid_range.map(|ranges| ranges.single()).flatten(); + scalar_load_metadata(self, load, scalar, vr); } OperandValue::Immediate(self.to_immediate(load, place.layout)) } @@ -705,16 +705,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let b_offset = a.size(self).align_to(b.align(self).abi); let pair_type = place.layout.gcc_type(self, false); - let mut load = |i, scalar: &abi::Scalar, align| { + let mut load = |i, scalar: &abi::Scalar, align, vr| { let llptr = self.struct_gep(pair_type, place.llval, i as u64); let load = self.load(llptr.get_type(), llptr, align); - scalar_load_metadata(self, load, scalar); + scalar_load_metadata(self, load, scalar, vr); if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } }; + let (vr_a, vr_b) = place.scalar_valid_range.map(|ranges| ranges.pair()).flatten().unzip(); OperandValue::Pair( - load(0, a, place.align), - load(1, b, place.align.restrict_for_offset(b_offset)), + load(0, a, place.align, vr_a), + load(1, b, place.align.restrict_for_offset(b_offset), vr_b), ) } else { @@ -747,7 +748,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.switch_to_block(body_bb); let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); - cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); + let dest = PlaceRef::new_sized_aligned(self.cx(), current_val, cg_elem.layout, align); + cg_elem.val.store(&mut self, dest); let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); self.llbb().add_assignment(None, current, next); diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index b056b6d473042..1d791f3975d16 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -239,7 +239,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None); self.const_bitcast(value, ty) }; - PlaceRef::new_sized(value, layout) + PlaceRef::new_sized(self, value, layout) } fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 68a05d95ef734..1b005cc17dbca 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -91,7 +91,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let name_str = name.as_str(); let llret_ty = self.layout_of(ret_ty).gcc_type(self, true); - let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); + let result = PlaceRef::new_sized(self, llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); let llval = diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 58996a9db78ad..fdb74f346bf81 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -6,7 +6,7 @@ * TODO(antoyo): remove the patches. */ -#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)] +#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len, unzip_option)] #![allow(broken_intra_doc_links)] #![recursion_limit="256"] #![warn(rust_2018_idioms)] diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index c41a41980eb0b..d7e222f50b010 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -478,20 +478,21 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { load: &'ll Value, scalar: abi::Scalar, layout: TyAndLayout<'tcx>, + vr: Option, offset: Size, ) { if !scalar.is_always_valid(bx) { bx.noundef_metadata(load); } - match scalar.primitive() { - abi::Int(..) => { - if !scalar.is_always_valid(bx) { - bx.range_metadata(load, scalar.valid_range(bx)); + match (vr, scalar.primitive()) { + (Some(vr), abi::Int(..)) => { + if !vr.is_full_for(scalar.size(bx)) { + bx.range_metadata(load, vr); } } - abi::Pointer => { - if !scalar.valid_range(bx).contains(0) { + (Some(vr), abi::Pointer) => { + if !vr.contains(0) { bx.nonnull_metadata(load); } @@ -501,7 +502,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } } - abi::F32 | abi::F64 => {} + _ => {} } } @@ -524,7 +525,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let llval = const_llval.unwrap_or_else(|| { let load = self.load(llty, place.llval, place.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { - scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); + let vr = place.scalar_valid_range.map(|range| range.single()).flatten(); + scalar_load_metadata(self, load, scalar, place.layout, vr, Size::ZERO); } load }); @@ -533,17 +535,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let b_offset = a.size(self).align_to(b.align(self).abi); let pair_ty = place.layout.llvm_type(self); - let mut load = |i, scalar: abi::Scalar, layout, align, offset| { + let mut load = |i, scalar: abi::Scalar, layout, align, vr, offset| { let llptr = self.struct_gep(pair_ty, place.llval, i as u64); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); - scalar_load_metadata(self, load, scalar, layout, offset); + scalar_load_metadata(self, load, scalar, layout, vr, offset); self.to_immediate_scalar(load, scalar) }; + let (vr_a, vr_b) = place.scalar_valid_range.map(|vrs| vrs.pair()).flatten().unzip(); OperandValue::Pair( - load(0, a, place.layout, place.align, Size::ZERO), - load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), + load(0, a, place.layout, place.align, vr_a, Size::ZERO), + load(1, b, place.layout, place.align.restrict_for_offset(b_offset), vr_b, b_offset), ) } else { OperandValue::Ref(place.llval, None, place.align) @@ -577,9 +580,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut body_bx = Self::build(self.cx, body_bb); let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); - cg_elem - .val - .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align)); + cg_elem.val.store( + &mut body_bx, + PlaceRef::new_sized_aligned(self.cx, current, cg_elem.layout, align), + ); let next = body_bx.inbounds_gep( self.backend_type(cg_elem.layout), diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index b69d7a000ee9a..a7bb6bb82dbd2 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -308,7 +308,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }; self.const_bitcast(llval, llty) }; - PlaceRef::new_sized(llval, layout) + PlaceRef::new_sized(self, llval, layout) } fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 9927f5f399bcd..82641301cc0e3 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -99,7 +99,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let name = tcx.item_name(def_id); let llret_ty = self.layout_of(ret_ty).llvm_type(self); - let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); + let result = PlaceRef::new_sized(self, llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); let llval = match name { diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 913cf4eea13a3..afef1b4c109dc 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -11,6 +11,7 @@ #![feature(once_cell)] #![feature(nll)] #![feature(iter_intersperse)] +#![feature(unzip_option)] #![recursion_limit = "256"] #![allow(rustc::potential_query_instability)] diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 03ef6d50d44cd..b67f4dd0cda33 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1240,7 +1240,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Handle both by-ref and immediate tuples. if let Ref(llval, None, align) = tuple.val { - let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align); + let tuple_ptr = PlaceRef::new_sized_aligned(bx, llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); let field = bx.load_operand(field_ptr); @@ -1581,7 +1581,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llty = bx.backend_type(src.layout); let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty)); let align = src.layout.align.abi.min(dst.align); - src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align)); + src.val.store(bx, PlaceRef::new_sized_aligned(bx, cast_ptr, src.layout, align)); } // Stores the return value of a function call into it's final location. diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 0ed4c3f1d9430..9fbfcf7a8bfb8 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -70,7 +70,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let name_str = name.as_str(); let llret_ty = bx.backend_type(bx.layout_of(ret_ty)); - let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); + let result = PlaceRef::new_sized(bx, llresult, fn_abi.ret.layout); let llval = match name { sym::assume => { diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 0c958de64fa1c..17a22dbf9fc62 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -217,7 +217,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); let llretptr = bx.get_param(0); - return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); + return LocalRef::Place(PlaceRef::new_sized(&bx, llretptr, layout)); } if memory_locals.contains(local) { @@ -356,7 +356,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // FIXME: lifetimes let llarg = bx.get_param(llarg_idx); llarg_idx += 1; - LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout)) + LocalRef::Place(PlaceRef::new_sized(bx, llarg, arg.layout)) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 08be4c0a7b622..c87090750e710 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -137,7 +137,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self), }; let layout = cx.layout_of(projected_ty); - PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi } + PlaceRef { + llval: llptr, + llextra, + layout, + align: layout.align.abi, + scalar_valid_range: layout.abi.scalar_valid_range(cx), + } } /// If this operand is a `Pair`, we return an aggregate with the two values. diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 3185b952ab886..fda3d97a1c639 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -10,7 +10,7 @@ use rustc_middle::mir; use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Ty}; -use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding}; +use rustc_target::abi::{Abi, Align, FieldsShape, Int, ScalarRanges, TagEncoding}; use rustc_target::abi::{VariantIdx, Variants}; #[derive(Copy, Clone, Debug)] @@ -26,21 +26,30 @@ pub struct PlaceRef<'tcx, V> { /// The alignment we know for this place. pub align: Align, + + /// If the place refers to a `Scalar` value, this represents the range + /// of valid values. + pub scalar_valid_range: Option, } impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { - pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { - assert!(!layout.is_unsized()); - PlaceRef { llval, llextra: None, layout, align: layout.align.abi } + pub fn new_sized( + cx: &impl HasTyCtxt<'tcx>, + llval: V, + layout: TyAndLayout<'tcx>, + ) -> PlaceRef<'tcx, V> { + Self::new_sized_aligned(cx, llval, layout, layout.align.abi) } pub fn new_sized_aligned( + cx: &impl HasTyCtxt<'tcx>, llval: V, layout: TyAndLayout<'tcx>, align: Align, ) -> PlaceRef<'tcx, V> { assert!(!layout.is_unsized()); - PlaceRef { llval, llextra: None, layout, align } + let scalar_valid_range = layout.abi.scalar_valid_range(cx); + PlaceRef { llval, llextra: None, layout, align, scalar_valid_range } } // FIXME(eddyb) pass something else for the name so no work is done @@ -51,7 +60,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); - Self::new_sized(tmp, layout) + Self::new_sized(bx, tmp, layout) } /// Returns a place for an indirect reference to an unsized place. @@ -125,12 +134,24 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix)) } }; + + // If this field is a primitive type of a newtype, propogate the scalar valid range + let scalar_valid_range = + if let (0, Variants::Single { .. }, Abi::Scalar(_) | Abi::ScalarPair(..)) = + (offset.bytes(), &self.layout.variants, field.abi) + { + self.scalar_valid_range + } else { + field.abi.scalar_valid_range(bx) + }; + PlaceRef { // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, layout: field, align: effective_field_align, + scalar_valid_range, } }; @@ -200,6 +221,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { llextra: self.llextra, layout: field, align: effective_field_align, + scalar_valid_range: field.abi.scalar_valid_range(bx), } } @@ -392,6 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { llextra: None, layout, align: self.align.restrict_for_offset(offset), + scalar_valid_range: layout.abi.scalar_valid_range(bx), } } @@ -402,6 +425,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { let mut downcast = *self; downcast.layout = self.layout.for_variant(bx.cx(), variant_index); + downcast.scalar_valid_range = downcast.layout.abi.scalar_valid_range(bx); // Cast to the appropriate variant struct type. let variant_ty = bx.cx().backend_type(downcast.layout); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index fd29c9e281b92..64189e2fb22b2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -64,7 +64,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { scratch.storage_dead(&mut bx); } OperandValue::Ref(llref, None, align) => { - let source = PlaceRef::new_sized_aligned(llref, operand.layout, align); + let source = PlaceRef::new_sized_aligned(&bx, llref, operand.layout, align); base::coerce_unsized_into(&mut bx, source, dest); } OperandValue::Ref(_, Some(_), _) => { diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs index a771369c80789..a6745827a44d9 100644 --- a/compiler/rustc_target/src/abi/mod.rs +++ b/compiler/rustc_target/src/abi/mod.rs @@ -1038,6 +1038,34 @@ impl AddressSpace { pub const DATA: Self = AddressSpace(0); } +/// Inclusive wrap-around range of valid values for a single Scalar or ScalarPair. +#[derive(Copy, Clone, Debug)] +pub enum ScalarRanges { + /// Valid range for a single Scalar. + Single(WrappingRange), + + /// Valid ranges for a ScalarPair. + Pair(WrappingRange, WrappingRange), +} + +impl ScalarRanges { + /// Optionally returns the valid range for a single Scalar. + pub fn single(self) -> Option { + match self { + ScalarRanges::Single(valid_range) => Some(valid_range), + ScalarRanges::Pair(_, _) => None, + } + } + + /// Optionally returns a tuple of valid ranges for a ScalarPair. + pub fn pair(self) -> Option<(WrappingRange, WrappingRange)> { + match self { + ScalarRanges::Single(_) => None, + ScalarRanges::Pair(s1, s2) => Some((s1, s2)), + } + } +} + /// Describes how values of the type are passed by target ABIs, /// in terms of categories of C types there are ABI rules for. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] @@ -1088,6 +1116,17 @@ impl Abi { pub fn is_scalar(&self) -> bool { matches!(*self, Abi::Scalar(_)) } + + /// Returns the range of valid values if this is a scalar type + pub fn scalar_valid_range(&self, cx: &impl HasDataLayout) -> Option { + match self { + Abi::Scalar(scalar) => Some(ScalarRanges::Single(scalar.valid_range(cx))), + Abi::ScalarPair(s1, s2) => { + Some(ScalarRanges::Pair(s1.valid_range(cx), s2.valid_range(cx))) + } + _ => None, + } + } } rustc_index::newtype_index! { diff --git a/src/test/codegen/scalar-ranges-newtypes.rs b/src/test/codegen/scalar-ranges-newtypes.rs new file mode 100644 index 0000000000000..e715bdd1bd1ce --- /dev/null +++ b/src/test/codegen/scalar-ranges-newtypes.rs @@ -0,0 +1,57 @@ +#![crate_type = "lib"] +#![feature(rustc_attrs, bench_black_box)] + +#[rustc_layout_scalar_valid_range_start(1)] +pub struct NonNull1(*const ()); + +#[rustc_layout_scalar_valid_range_start(1)] +pub struct NonNull2 { + ptr: *const (), +} + +#[rustc_layout_scalar_valid_range_start(1)] +pub struct NonNull3 { + _marker: std::marker::PhantomData<()>, + ptr: *const (), +} + +trait Foo {} + +#[rustc_layout_scalar_valid_range_start(1)] +pub struct NonNull4(*const dyn Foo); + +// CHECK: define void @test_nonnull_load +#[no_mangle] +pub fn test_nonnull_load(p1: &NonNull1, p2: &NonNull2, p3: &NonNull3, p4: &NonNull4) { + // CHECK: %[[P1:[0-9]+]] = bitcast i8** %p1 to {}** + // CHECK: load {}*, {}** %[[P1]], align 8, !nonnull + std::hint::black_box(p1.0); + + // CHECK: %[[P2:[0-9]+]] = bitcast i8** %p2 to {}** + // CHECK: load {}*, {}** %[[P2]], align 8, !nonnull + std::hint::black_box(p2.ptr); + + // CHECK: %[[P3:[0-9]+]] = bitcast i8** %p3 to {}** + // CHECK: load {}*, {}** %[[P3]], align 8, !nonnull + std::hint::black_box(p3.ptr); + + // CHECK: %[[P4_PTR:[0-9]+]] = bitcast { i8*, i64* }* %p4 to {}** + // CHECK: load {}*, {}** %[[P4_PTR]], align 8, !nonnull + // CHECK: %[[P4_VTABLE:[0-9]+]] = getelementptr inbounds { i8*, i64* }, { i8*, i64* }* %p4, i64 0, i32 1 + // CHECK: %[[P4_VTABLE_PTR:[0-9]+]] = bitcast i64** %[[P4_VTABLE]] to [3 x i64]** + // CHECK: load [3 x i64]*, [3 x i64]** %[[P4_VTABLE_PTR]], align 8, !nonnull + std::hint::black_box(p4.0); +} + +#[rustc_layout_scalar_valid_range_start(16)] +#[rustc_layout_scalar_valid_range_end(2032)] +pub struct Range(i32); + +// CHECK: define void @test_range_load +#[no_mangle] +pub fn test_range_load(p: &Range) { + // CHECK: load i32, i32* %{{.*}}, align 4, !range ![[RANGE:[0-9]+]] + std::hint::black_box(p.0); +} + +// CHECK: ![[RANGE]] = !{i32 16, i32 2033}