From ad758f885339d1694a7d1f580c5823f0c3bf8da7 Mon Sep 17 00:00:00 2001 From: Olivier Goffart Date: Thu, 26 Mar 2020 09:22:17 +0100 Subject: [PATCH 1/2] Use the niche optimisation if other enum variants are small enough * Put the largest niche first * Add test from issue 63866 * Add test for enum of sized/unsized * Prefer fields that are already earlier in the struct --- compiler/rustc_middle/src/ty/layout.rs | 183 +++++++++++++----- compiler/rustc_mir_build/src/thir/mod.rs | 2 +- .../ui/consts/const-eval/const_transmute.rs | 2 + src/test/ui/print_type_sizes/padding.stdout | 30 ++- src/test/ui/type-sizes.rs | 85 ++++++++ 5 files changed, 237 insertions(+), 65 deletions(-) diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 0fda1473f6488..b0fba77360795 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -285,6 +285,31 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + let largest_niche_index = if matches!(kind, StructKind::Prefixed{..}) || repr.hide_niche() { + None + } else { + fields + .iter() + .enumerate() + .filter_map(|(i, &field)| field.largest_niche.as_ref().map(|n| (i, n))) + .max_by_key(|(i, niche)| { + ( + niche.available(dl), + // Prefer niches that occur earlier in their respective field, to maximize space after the niche. + cmp::Reverse(niche.offset), + // Prefer fields that occur earlier in the struct, to avoid reordering fields unnecessarily. + cmp::Reverse(*i), + ) + }) + .map(|(i, _)| i as u32) + }; + + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we invert `inverse_memory_index` to + // produce `memory_index` (see `invert_mapping`). let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); let optimize = !repr.inhibit_struct_field_reordering_opt(); @@ -298,10 +323,15 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { match kind { StructKind::AlwaysSized | StructKind::MaybeUnsized => { optimizing.sort_by_key(|&x| { - // Place ZSTs first to avoid "interesting offsets", - // especially with only one or two non-ZST fields. let f = &fields[x as usize]; - (!f.is_zst(), cmp::Reverse(field_align(f))) + ( + // Place ZSTs first to avoid "interesting offsets", + // especially with only one or two non-ZST fields. + !f.is_zst(), + cmp::Reverse(field_align(f)), + // Try to put the largest niche earlier. + Some(x) != largest_niche_index, + ) }); } StructKind::Prefixed(..) => { @@ -310,20 +340,29 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); } } - } - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we invert `inverse_memory_index` to - // produce `memory_index` (see `invert_mapping`). + // Rotate index array to put the largest niche first. Then reverse the ones with larger + // alignment. Since it is already the first amongst the types with the same alignment, + // this will just move some of the potential padding within the structure. + if let (Some(niche_index), StructKind::AlwaysSized) = (largest_niche_index, kind) { + // ZSTs are always first, and the largest niche is not one, so we can unwrap + let first_non_zst = inverse_memory_index + .iter() + .position(|&x| !fields[x as usize].is_zst()) + .unwrap(); + let non_zsts = &mut inverse_memory_index[first_non_zst..]; + let pivot = non_zsts.iter().position(|&x| x == niche_index).unwrap(); + non_zsts.rotate_left(pivot); + let pivot = non_zsts.len() - pivot; + non_zsts[pivot..].reverse(); + debug_assert_eq!(non_zsts[0], niche_index); + } + } let mut sized = true; let mut offsets = vec![Size::ZERO; fields.len()]; let mut offset = Size::ZERO; let mut largest_niche = None; - let mut largest_niche_available = 0; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { let prefix_align = @@ -354,15 +393,10 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { debug!("univariant offset: {:?} field: {:#?}", offset, field); offsets[i as usize] = offset; - if !repr.hide_niche() { - if let Some(mut niche) = field.largest_niche.clone() { - let available = niche.available(dl); - if available > largest_niche_available { - largest_niche_available = available; - niche.offset += offset; - largest_niche = Some(niche); - } - } + if largest_niche_index == Some(i) { + let mut niche = field.largest_niche.clone().unwrap(); + niche.offset += offset; + largest_niche = Some(niche) } offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?; @@ -864,48 +898,80 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { let mut dataful_variant = None; let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); + let mut max_size = Size::ZERO; + let mut second_max_size = Size::ZERO; + let mut align = dl.aggregate_align; + + // The size computations below assume that the padding is minimum. + // This is the case when fields are re-ordered. + let struct_reordering_opt = !def.repr.inhibit_struct_field_reordering_opt(); + + let mut extend_niche_range = |d| { + niche_variants = + *niche_variants.start().min(&d)..=*niche_variants.end().max(&d); + }; - // Find one non-ZST variant. - 'variants: for (v, fields) in variants.iter_enumerated() { + // Find the largest and second largest variant. + for (v, fields) in variants.iter_enumerated() { if absent(fields) { - continue 'variants; + continue; } - for f in fields { - if !f.is_zst() { - if dataful_variant.is_none() { - dataful_variant = Some(v); - continue 'variants; - } else { - dataful_variant = None; - break 'variants; - } + let mut size = Size::ZERO; + for &f in fields { + align = align.max(f.align); + size += f.size; + } + if size > max_size { + second_max_size = max_size; + max_size = size; + if let Some(d) = dataful_variant { + extend_niche_range(d); } + dataful_variant = Some(v); + } else if size == max_size { + if let Some(d) = dataful_variant { + extend_niche_range(d); + } + dataful_variant = None; + extend_niche_range(v); + } else { + second_max_size = second_max_size.max(size); + extend_niche_range(v); } - niche_variants = *niche_variants.start().min(&v)..=v; } if niche_variants.start() > niche_variants.end() { dataful_variant = None; } - if let Some(i) = dataful_variant { + if let Some(dataful_variant) = dataful_variant { let count = (niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1) as u128; // Find the field with the largest niche - let niche_candidate = variants[i] + let niche_candidate = variants[dataful_variant] .iter() .enumerate() .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?))) - .max_by_key(|(_, niche)| niche.available(dl)); + .max_by_key(|(_, n)| (n.available(dl), cmp::Reverse(n.offset))) + .and_then(|(field_index, niche)| { + if !struct_reordering_opt && second_max_size > Size::ZERO { + return None; + } + // make sure there is enough room for the other variants + if max_size - (niche.offset + niche.scalar.value.size(dl)) + < second_max_size + { + return None; + } + Some((field_index, niche, niche.reserve(self, count)?)) + }); if let Some((field_index, niche, (niche_start, niche_scalar))) = - niche_candidate.and_then(|(field_index, niche)| { - Some((field_index, niche, niche.reserve(self, count)?)) - }) + niche_candidate { - let mut align = dl.aggregate_align; + let prefix = niche.offset + niche.scalar.value.size(dl); let st = variants .iter_enumerated() .map(|(j, v)| { @@ -913,23 +979,42 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { ty, v, &def.repr, - StructKind::AlwaysSized, + if j == dataful_variant || second_max_size == Size::ZERO { + StructKind::AlwaysSized + } else { + StructKind::Prefixed( + prefix, + Align::from_bytes(1).unwrap(), + ) + }, )?; st.variants = Variants::Single { index: j }; - align = align.max(st.align); - + debug_assert_eq!(align, align.max(st.align)); Ok(st) }) .collect::, _>>()?; - let offset = st[i].fields.offset(field_index) + niche.offset; - let size = st[i].size; + let offset = if struct_reordering_opt { + debug_assert_eq!( + st[dataful_variant].fields.offset(field_index), + Size::ZERO + ); + niche.offset + } else { + st[dataful_variant].fields.offset(field_index) + niche.offset + }; + + let size = st[dataful_variant].size.align_to(align.abi); + debug_assert!( + !struct_reordering_opt || size == max_size.align_to(align.abi) + ); + debug_assert!(st.iter().all(|v| v.size <= size)); let abi = if st.iter().all(|v| v.abi.is_uninhabited()) { Abi::Uninhabited - } else { - match st[i].abi { + } else if second_max_size == Size::ZERO { + match st[dataful_variant].abi { Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()), Abi::ScalarPair(ref first, ref second) => { // We need to use scalar_unit to reset the @@ -951,6 +1036,8 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } _ => Abi::Aggregate { sized: true }, } + } else { + Abi::Aggregate { sized: true } }; let largest_niche = @@ -960,7 +1047,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { variants: Variants::Multiple { tag: niche_scalar, tag_encoding: TagEncoding::Niche { - dataful_variant: i, + dataful_variant, niche_variants, niche_start, }, diff --git a/compiler/rustc_mir_build/src/thir/mod.rs b/compiler/rustc_mir_build/src/thir/mod.rs index 4d57fd5c64f8d..e7ec976dc314b 100644 --- a/compiler/rustc_mir_build/src/thir/mod.rs +++ b/compiler/rustc_mir_build/src/thir/mod.rs @@ -97,7 +97,7 @@ crate enum StmtKind<'tcx> { // `Expr` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] -rustc_data_structures::static_assert_size!(Expr<'_>, 168); +rustc_data_structures::static_assert_size!(Expr<'_>, if cfg!(bootstrap) { 168 } else { 160 }); /// The Thir trait implementor lowers their expressions (`&'tcx H::Expr`) /// into instances of this `Expr` enum. This lowering can be done diff --git a/src/test/ui/consts/const-eval/const_transmute.rs b/src/test/ui/consts/const-eval/const_transmute.rs index f0e1d8263022b..59d53c3095120 100644 --- a/src/test/ui/consts/const-eval/const_transmute.rs +++ b/src/test/ui/consts/const-eval/const_transmute.rs @@ -33,8 +33,10 @@ impl Drop for Foo { } #[derive(Copy, Clone)] +#[repr(C)] struct Fat<'a>(&'a Foo, &'static VTable); +#[repr(C)] struct VTable { drop: Option fn(&'a mut Foo)>, size: usize, diff --git a/src/test/ui/print_type_sizes/padding.stdout b/src/test/ui/print_type_sizes/padding.stdout index 9afdf76245df7..0071ef71146fd 100644 --- a/src/test/ui/print_type_sizes/padding.stdout +++ b/src/test/ui/print_type_sizes/padding.stdout @@ -1,23 +1,21 @@ -print-type-size type: `E1`: 12 bytes, alignment: 4 bytes -print-type-size discriminant: 1 bytes -print-type-size variant `B`: 11 bytes -print-type-size padding: 3 bytes -print-type-size field `.0`: 8 bytes, alignment: 4 bytes -print-type-size variant `A`: 7 bytes -print-type-size field `.1`: 1 bytes +print-type-size type: `E1`: 8 bytes, alignment: 4 bytes +print-type-size variant `A`: 8 bytes +print-type-size padding: 1 bytes +print-type-size field `.1`: 1 bytes, alignment: 1 bytes print-type-size padding: 2 bytes print-type-size field `.0`: 4 bytes, alignment: 4 bytes -print-type-size type: `E2`: 12 bytes, alignment: 4 bytes -print-type-size discriminant: 1 bytes -print-type-size variant `B`: 11 bytes -print-type-size padding: 3 bytes -print-type-size field `.0`: 8 bytes, alignment: 4 bytes -print-type-size variant `A`: 7 bytes -print-type-size field `.0`: 1 bytes +print-type-size variant `B`: 8 bytes +print-type-size field `.0`: 8 bytes +print-type-size type: `E2`: 8 bytes, alignment: 4 bytes +print-type-size variant `A`: 8 bytes +print-type-size padding: 1 bytes +print-type-size field `.0`: 1 bytes, alignment: 1 bytes print-type-size padding: 2 bytes print-type-size field `.1`: 4 bytes, alignment: 4 bytes +print-type-size variant `B`: 8 bytes +print-type-size field `.0`: 8 bytes print-type-size type: `S`: 8 bytes, alignment: 4 bytes -print-type-size field `.g`: 4 bytes print-type-size field `.a`: 1 bytes print-type-size field `.b`: 1 bytes -print-type-size end padding: 2 bytes +print-type-size padding: 2 bytes +print-type-size field `.g`: 4 bytes, alignment: 4 bytes diff --git a/src/test/ui/type-sizes.rs b/src/test/ui/type-sizes.rs index 73a11a5e743f6..68da65884dbcf 100644 --- a/src/test/ui/type-sizes.rs +++ b/src/test/ui/type-sizes.rs @@ -6,6 +6,7 @@ use std::mem::size_of; use std::num::NonZeroU8; +use std::ptr::NonNull; struct t {a: u8, b: i8} struct u {a: u8, b: i8, c: u8} @@ -120,6 +121,60 @@ pub enum AlwaysTaggedBecauseItHasNoNiche { B } +struct BoolInTheMiddle(std::num::NonZeroU16, bool, u8); + +enum NicheWithData { + A, + B([u16; 5]), + Largest { a1: u32, a2: BoolInTheMiddle, a3: u32 }, + C, + D(u32, u32), +} + +// A type with almost 2^16 invalid values. +#[repr(u16)] +pub enum NicheU16 { + _0, +} + +pub enum EnumManyVariant { + Dataful(u8, X), + + // 0x100 niche variants. + _00, _01, _02, _03, _04, _05, _06, _07, _08, _09, _0A, _0B, _0C, _0D, _0E, _0F, + _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _1A, _1B, _1C, _1D, _1E, _1F, + _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _2A, _2B, _2C, _2D, _2E, _2F, + _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _3A, _3B, _3C, _3D, _3E, _3F, + _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _4A, _4B, _4C, _4D, _4E, _4F, + _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _5A, _5B, _5C, _5D, _5E, _5F, + _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _6A, _6B, _6C, _6D, _6E, _6F, + _70, _71, _72, _73, _74, _75, _76, _77, _78, _79, _7A, _7B, _7C, _7D, _7E, _7F, + _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _8A, _8B, _8C, _8D, _8E, _8F, + _90, _91, _92, _93, _94, _95, _96, _97, _98, _99, _9A, _9B, _9C, _9D, _9E, _9F, + _A0, _A1, _A2, _A3, _A4, _A5, _A6, _A7, _A8, _A9, _AA, _AB, _AC, _AD, _AE, _AF, + _B0, _B1, _B2, _B3, _B4, _B5, _B6, _B7, _B8, _B9, _BA, _BB, _BC, _BD, _BE, _BF, + _C0, _C1, _C2, _C3, _C4, _C5, _C6, _C7, _C8, _C9, _CA, _CB, _CC, _CD, _CE, _CF, + _D0, _D1, _D2, _D3, _D4, _D5, _D6, _D7, _D8, _D9, _DA, _DB, _DC, _DD, _DE, _DF, + _E0, _E1, _E2, _E3, _E4, _E5, _E6, _E7, _E8, _E9, _EA, _EB, _EC, _ED, _EE, _EF, + _F0, _F1, _F2, _F3, _F4, _F5, _F6, _F7, _F8, _F9, _FA, _FB, _FC, _FD, _FE, _FF, +} + +// Discriminant should fit in the niche of the pointer to `Unsized` (#66029) +type Sized = usize; +type Unsized = [usize]; +enum NicheFillingWithBox { + Sized(Box), + Unsized(Box), +} +enum NicheFillingWithUnitPtr { + Sized(/* 0usize */ NonNull<()>), + Unsized(NonNull<()>, usize), +} +struct ManuallyNiched { + a: usize, + b: usize, +} + pub fn main() { assert_eq!(size_of::(), 1 as usize); assert_eq!(size_of::(), 4 as usize); @@ -170,4 +225,34 @@ pub fn main() { assert_eq!(size_of::(), 8); assert_eq!(size_of::>(), 8); assert_eq!(size_of::>>(), 8); + + struct S1{ a: u16, b: std::num::NonZeroU16, c: u16, d: u8, e: u32, f: u64, g:[u8;2] } + assert_eq!(size_of::(), 24); + assert_eq!(size_of::>(), 24); + + assert_eq!(size_of::(), 12); + assert_eq!(size_of::>(), 12); + assert_eq!(size_of::>>(), 12); + assert_eq!( + size_of::>>>(), + size_of::<(&(), NicheWithData)>() + ); + + pub enum FillPadding { A(std::num::NonZeroU8, u32), B } + assert_eq!(size_of::(), 8); + assert_eq!(size_of::>(), 8); + assert_eq!(size_of::>>(), 8); + + assert_eq!(size_of::>(), 4); + assert_eq!(size_of::>>(), 4); + assert_eq!(size_of::>(), 4); + + assert_eq!(size_of::>(), 6); + assert_eq!(size_of::>(), 4); + assert_eq!(size_of::>>(), 4); + assert_eq!(size_of::>>(), 6); + assert_eq!(size_of::>>(), 6); + + assert_eq!(size_of::(), size_of::()); + assert_eq!(size_of::(), size_of::()); } From d5ef4b61b59826e39143b3248769d759bf37266d Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sat, 26 Sep 2020 20:43:46 -0400 Subject: [PATCH 2/2] TEMP FOR PERF: remove the optimization, leaving only the field reordering --- compiler/rustc_middle/src/ty/layout.rs | 109 ++++++----------------- compiler/rustc_mir_build/src/thir/mod.rs | 2 +- 2 files changed, 29 insertions(+), 82 deletions(-) diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index b0fba77360795..634e30a9f7a25 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -898,80 +898,48 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { let mut dataful_variant = None; let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); - let mut max_size = Size::ZERO; - let mut second_max_size = Size::ZERO; - let mut align = dl.aggregate_align; - // The size computations below assume that the padding is minimum. - // This is the case when fields are re-ordered. - let struct_reordering_opt = !def.repr.inhibit_struct_field_reordering_opt(); - - let mut extend_niche_range = |d| { - niche_variants = - *niche_variants.start().min(&d)..=*niche_variants.end().max(&d); - }; - - // Find the largest and second largest variant. - for (v, fields) in variants.iter_enumerated() { + // Find one non-ZST variant. + 'variants: for (v, fields) in variants.iter_enumerated() { if absent(fields) { - continue; + continue 'variants; } - let mut size = Size::ZERO; - for &f in fields { - align = align.max(f.align); - size += f.size; - } - if size > max_size { - second_max_size = max_size; - max_size = size; - if let Some(d) = dataful_variant { - extend_niche_range(d); - } - dataful_variant = Some(v); - } else if size == max_size { - if let Some(d) = dataful_variant { - extend_niche_range(d); + for f in fields { + if !f.is_zst() { + if dataful_variant.is_none() { + dataful_variant = Some(v); + continue 'variants; + } else { + dataful_variant = None; + break 'variants; + } } - dataful_variant = None; - extend_niche_range(v); - } else { - second_max_size = second_max_size.max(size); - extend_niche_range(v); } + niche_variants = *niche_variants.start().min(&v)..=v; } if niche_variants.start() > niche_variants.end() { dataful_variant = None; } - if let Some(dataful_variant) = dataful_variant { + if let Some(i) = dataful_variant { let count = (niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1) as u128; // Find the field with the largest niche - let niche_candidate = variants[dataful_variant] + let niche_candidate = variants[i] .iter() .enumerate() .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?))) - .max_by_key(|(_, n)| (n.available(dl), cmp::Reverse(n.offset))) - .and_then(|(field_index, niche)| { - if !struct_reordering_opt && second_max_size > Size::ZERO { - return None; - } - // make sure there is enough room for the other variants - if max_size - (niche.offset + niche.scalar.value.size(dl)) - < second_max_size - { - return None; - } - Some((field_index, niche, niche.reserve(self, count)?)) - }); + .max_by_key(|(_, niche)| niche.available(dl)); if let Some((field_index, niche, (niche_start, niche_scalar))) = - niche_candidate + niche_candidate.and_then(|(field_index, niche)| { + Some((field_index, niche, niche.reserve(self, count)?)) + }) { - let prefix = niche.offset + niche.scalar.value.size(dl); + let mut align = dl.aggregate_align; let st = variants .iter_enumerated() .map(|(j, v)| { @@ -979,42 +947,23 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { ty, v, &def.repr, - if j == dataful_variant || second_max_size == Size::ZERO { - StructKind::AlwaysSized - } else { - StructKind::Prefixed( - prefix, - Align::from_bytes(1).unwrap(), - ) - }, + StructKind::AlwaysSized, )?; st.variants = Variants::Single { index: j }; - debug_assert_eq!(align, align.max(st.align)); + align = align.max(st.align); + Ok(st) }) .collect::, _>>()?; - let offset = if struct_reordering_opt { - debug_assert_eq!( - st[dataful_variant].fields.offset(field_index), - Size::ZERO - ); - niche.offset - } else { - st[dataful_variant].fields.offset(field_index) + niche.offset - }; - - let size = st[dataful_variant].size.align_to(align.abi); - debug_assert!( - !struct_reordering_opt || size == max_size.align_to(align.abi) - ); - debug_assert!(st.iter().all(|v| v.size <= size)); + let offset = st[i].fields.offset(field_index) + niche.offset; + let size = st[i].size; let abi = if st.iter().all(|v| v.abi.is_uninhabited()) { Abi::Uninhabited - } else if second_max_size == Size::ZERO { - match st[dataful_variant].abi { + } else { + match st[i].abi { Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()), Abi::ScalarPair(ref first, ref second) => { // We need to use scalar_unit to reset the @@ -1036,8 +985,6 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } _ => Abi::Aggregate { sized: true }, } - } else { - Abi::Aggregate { sized: true } }; let largest_niche = @@ -1047,7 +994,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { variants: Variants::Multiple { tag: niche_scalar, tag_encoding: TagEncoding::Niche { - dataful_variant, + dataful_variant: i, niche_variants, niche_start, }, diff --git a/compiler/rustc_mir_build/src/thir/mod.rs b/compiler/rustc_mir_build/src/thir/mod.rs index e7ec976dc314b..4d57fd5c64f8d 100644 --- a/compiler/rustc_mir_build/src/thir/mod.rs +++ b/compiler/rustc_mir_build/src/thir/mod.rs @@ -97,7 +97,7 @@ crate enum StmtKind<'tcx> { // `Expr` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] -rustc_data_structures::static_assert_size!(Expr<'_>, if cfg!(bootstrap) { 168 } else { 160 }); +rustc_data_structures::static_assert_size!(Expr<'_>, 168); /// The Thir trait implementor lowers their expressions (`&'tcx H::Expr`) /// into instances of this `Expr` enum. This lowering can be done