diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 0fda1473f6488..634e30a9f7a25 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -285,6 +285,31 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + let largest_niche_index = if matches!(kind, StructKind::Prefixed{..}) || repr.hide_niche() { + None + } else { + fields + .iter() + .enumerate() + .filter_map(|(i, &field)| field.largest_niche.as_ref().map(|n| (i, n))) + .max_by_key(|(i, niche)| { + ( + niche.available(dl), + // Prefer niches that occur earlier in their respective field, to maximize space after the niche. + cmp::Reverse(niche.offset), + // Prefer fields that occur earlier in the struct, to avoid reordering fields unnecessarily. + cmp::Reverse(*i), + ) + }) + .map(|(i, _)| i as u32) + }; + + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we invert `inverse_memory_index` to + // produce `memory_index` (see `invert_mapping`). let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); let optimize = !repr.inhibit_struct_field_reordering_opt(); @@ -298,10 +323,15 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { match kind { StructKind::AlwaysSized | StructKind::MaybeUnsized => { optimizing.sort_by_key(|&x| { - // Place ZSTs first to avoid "interesting offsets", - // especially with only one or two non-ZST fields. let f = &fields[x as usize]; - (!f.is_zst(), cmp::Reverse(field_align(f))) + ( + // Place ZSTs first to avoid "interesting offsets", + // especially with only one or two non-ZST fields. + !f.is_zst(), + cmp::Reverse(field_align(f)), + // Try to put the largest niche earlier. + Some(x) != largest_niche_index, + ) }); } StructKind::Prefixed(..) => { @@ -310,20 +340,29 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); } } - } - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we invert `inverse_memory_index` to - // produce `memory_index` (see `invert_mapping`). + // Rotate index array to put the largest niche first. Then reverse the ones with larger + // alignment. Since it is already the first amongst the types with the same alignment, + // this will just move some of the potential padding within the structure. + if let (Some(niche_index), StructKind::AlwaysSized) = (largest_niche_index, kind) { + // ZSTs are always first, and the largest niche is not one, so we can unwrap + let first_non_zst = inverse_memory_index + .iter() + .position(|&x| !fields[x as usize].is_zst()) + .unwrap(); + let non_zsts = &mut inverse_memory_index[first_non_zst..]; + let pivot = non_zsts.iter().position(|&x| x == niche_index).unwrap(); + non_zsts.rotate_left(pivot); + let pivot = non_zsts.len() - pivot; + non_zsts[pivot..].reverse(); + debug_assert_eq!(non_zsts[0], niche_index); + } + } let mut sized = true; let mut offsets = vec![Size::ZERO; fields.len()]; let mut offset = Size::ZERO; let mut largest_niche = None; - let mut largest_niche_available = 0; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { let prefix_align = @@ -354,15 +393,10 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { debug!("univariant offset: {:?} field: {:#?}", offset, field); offsets[i as usize] = offset; - if !repr.hide_niche() { - if let Some(mut niche) = field.largest_niche.clone() { - let available = niche.available(dl); - if available > largest_niche_available { - largest_niche_available = available; - niche.offset += offset; - largest_niche = Some(niche); - } - } + if largest_niche_index == Some(i) { + let mut niche = field.largest_niche.clone().unwrap(); + niche.offset += offset; + largest_niche = Some(niche) } offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?; diff --git a/src/test/ui/consts/const-eval/const_transmute.rs b/src/test/ui/consts/const-eval/const_transmute.rs index f0e1d8263022b..59d53c3095120 100644 --- a/src/test/ui/consts/const-eval/const_transmute.rs +++ b/src/test/ui/consts/const-eval/const_transmute.rs @@ -33,8 +33,10 @@ impl Drop for Foo { } #[derive(Copy, Clone)] +#[repr(C)] struct Fat<'a>(&'a Foo, &'static VTable); +#[repr(C)] struct VTable { drop: Option fn(&'a mut Foo)>, size: usize, diff --git a/src/test/ui/print_type_sizes/padding.stdout b/src/test/ui/print_type_sizes/padding.stdout index 9afdf76245df7..0071ef71146fd 100644 --- a/src/test/ui/print_type_sizes/padding.stdout +++ b/src/test/ui/print_type_sizes/padding.stdout @@ -1,23 +1,21 @@ -print-type-size type: `E1`: 12 bytes, alignment: 4 bytes -print-type-size discriminant: 1 bytes -print-type-size variant `B`: 11 bytes -print-type-size padding: 3 bytes -print-type-size field `.0`: 8 bytes, alignment: 4 bytes -print-type-size variant `A`: 7 bytes -print-type-size field `.1`: 1 bytes +print-type-size type: `E1`: 8 bytes, alignment: 4 bytes +print-type-size variant `A`: 8 bytes +print-type-size padding: 1 bytes +print-type-size field `.1`: 1 bytes, alignment: 1 bytes print-type-size padding: 2 bytes print-type-size field `.0`: 4 bytes, alignment: 4 bytes -print-type-size type: `E2`: 12 bytes, alignment: 4 bytes -print-type-size discriminant: 1 bytes -print-type-size variant `B`: 11 bytes -print-type-size padding: 3 bytes -print-type-size field `.0`: 8 bytes, alignment: 4 bytes -print-type-size variant `A`: 7 bytes -print-type-size field `.0`: 1 bytes +print-type-size variant `B`: 8 bytes +print-type-size field `.0`: 8 bytes +print-type-size type: `E2`: 8 bytes, alignment: 4 bytes +print-type-size variant `A`: 8 bytes +print-type-size padding: 1 bytes +print-type-size field `.0`: 1 bytes, alignment: 1 bytes print-type-size padding: 2 bytes print-type-size field `.1`: 4 bytes, alignment: 4 bytes +print-type-size variant `B`: 8 bytes +print-type-size field `.0`: 8 bytes print-type-size type: `S`: 8 bytes, alignment: 4 bytes -print-type-size field `.g`: 4 bytes print-type-size field `.a`: 1 bytes print-type-size field `.b`: 1 bytes -print-type-size end padding: 2 bytes +print-type-size padding: 2 bytes +print-type-size field `.g`: 4 bytes, alignment: 4 bytes diff --git a/src/test/ui/type-sizes.rs b/src/test/ui/type-sizes.rs index 73a11a5e743f6..68da65884dbcf 100644 --- a/src/test/ui/type-sizes.rs +++ b/src/test/ui/type-sizes.rs @@ -6,6 +6,7 @@ use std::mem::size_of; use std::num::NonZeroU8; +use std::ptr::NonNull; struct t {a: u8, b: i8} struct u {a: u8, b: i8, c: u8} @@ -120,6 +121,60 @@ pub enum AlwaysTaggedBecauseItHasNoNiche { B } +struct BoolInTheMiddle(std::num::NonZeroU16, bool, u8); + +enum NicheWithData { + A, + B([u16; 5]), + Largest { a1: u32, a2: BoolInTheMiddle, a3: u32 }, + C, + D(u32, u32), +} + +// A type with almost 2^16 invalid values. +#[repr(u16)] +pub enum NicheU16 { + _0, +} + +pub enum EnumManyVariant { + Dataful(u8, X), + + // 0x100 niche variants. + _00, _01, _02, _03, _04, _05, _06, _07, _08, _09, _0A, _0B, _0C, _0D, _0E, _0F, + _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _1A, _1B, _1C, _1D, _1E, _1F, + _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _2A, _2B, _2C, _2D, _2E, _2F, + _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _3A, _3B, _3C, _3D, _3E, _3F, + _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _4A, _4B, _4C, _4D, _4E, _4F, + _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _5A, _5B, _5C, _5D, _5E, _5F, + _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _6A, _6B, _6C, _6D, _6E, _6F, + _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _7A, _7B, _7C, _7D, _7E, _7F, + _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _8A, _8B, _8C, _8D, _8E, _8F, + _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, _9A, _9B, _9C, _9D, _9E, _9F, + _A0, _A1, _A2, _A3, _A4, _A5, _A6, _A7, _A8, _A9, _AA, _AB, _AC, _AD, _AE, _AF, + _B0, _B1, _B2, _B3, _B4, _B5, _B6, _B7, _B8, _B9, _BA, _BB, _BC, _BD, _BE, _BF, + _C0, _C1, _C2, _C3, _C4, _C5, _C6, _C7, _C8, _C9, _CA, _CB, _CC, _CD, _CE, _CF, + _D0, _D1, _D2, _D3, _D4, _D5, _D6, _D7, _D8, _D9, _DA, _DB, _DC, _DD, _DE, _DF, + _E0, _E1, _E2, _E3, _E4, _E5, _E6, _E7, _E8, _E9, _EA, _EB, _EC, _ED, _EE, _EF, + _F0, _F1, _F2, _F3, _F4, _F5, _F6, _F7, _F8, _F9, _FA, _FB, _FC, _FD, _FE, _FF, +} + +// Discriminant should fit in the niche of the pointer to `Unsized` (#66029) +type Sized = usize; +type Unsized = [usize]; +enum NicheFillingWithBox { + Sized(Box), + Unsized(Box), +} +enum NicheFillingWithUnitPtr { + Sized(/* 0usize */ NonNull<()>), + Unsized(NonNull<()>, usize), +} +struct ManuallyNiched { + a: usize, + b: usize, +} + pub fn main() { assert_eq!(size_of::(), 1 as usize); assert_eq!(size_of::(), 4 as usize); @@ -170,4 +225,34 @@ pub fn main() { assert_eq!(size_of::(), 8); assert_eq!(size_of::>(), 8); assert_eq!(size_of::>>(), 8); + + struct S1{ a: u16, b: std::num::NonZeroU16, c: u16, d: u8, e: u32, f: u64, g:[u8;2] } + assert_eq!(size_of::(), 24); + assert_eq!(size_of::>(), 24); + + assert_eq!(size_of::(), 12); + assert_eq!(size_of::>(), 12); + assert_eq!(size_of::>>(), 12); + assert_eq!( + size_of::>>>(), + size_of::<(&(), NicheWithData)>() + ); + + pub enum FillPadding { A(std::num::NonZeroU8, u32), B } + assert_eq!(size_of::(), 8); + assert_eq!(size_of::>(), 8); + assert_eq!(size_of::>>(), 8); + + assert_eq!(size_of::>(), 4); + assert_eq!(size_of::>>(), 4); + assert_eq!(size_of::>(), 4); + + assert_eq!(size_of::>(), 6); + assert_eq!(size_of::>(), 4); + assert_eq!(size_of::>>(), 4); + assert_eq!(size_of::>>(), 6); + assert_eq!(size_of::>>(), 6); + + assert_eq!(size_of::(), size_of::()); + assert_eq!(size_of::(), size_of::()); }