diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 6e3a4cae2f62b..a5ffe0650a8ac 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -348,50 +348,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 PassMode::Direct(_) => {
                     // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
                     // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
-                    // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
-                    // aggregates...
-                    if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
-                        assert!(
-                            arg.layout.is_sized(),
-                            "`PassMode::Direct` for unsized type: {}",
-                            arg.layout.ty
-                        );
-                        // This really shouldn't happen, since `immediate_llvm_type` will use
-                        // `layout.fields` to turn this Rust type into an LLVM type. This means all
-                        // sorts of Rust type details leak into the ABI. However wasm sadly *does*
-                        // currently use this mode so we have to allow it -- but we absolutely
-                        // shouldn't let any more targets do that.
-                        // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
-                        //
-                        // The unstable abi `PtxKernel` also uses Direct for now.
-                        // It needs to switch to something else before stabilization can happen.
-                        // (See issue: https://github.com/rust-lang/rust/issues/117271)
-                        assert!(
-                            matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64")
-                                || self.conv == Conv::PtxKernel,
-                            "`PassMode::Direct` for aggregates only allowed on wasm and `extern \"ptx-kernel\"` fns\nProblematic type: {:#?}",
-                            arg.layout,
-                        );
-                    }
+                    // guaranteeing that we generate ABI-compatible LLVM IR.
                     arg.layout.immediate_llvm_type(cx)
                 }
                 PassMode::Pair(..) => {
                     // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
                     // so for ScalarPair we can easily be sure that we are generating ABI-compatible
                     // LLVM IR.
-                    assert!(
-                        matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
-                        "PassMode::Pair for type {}",
-                        arg.layout.ty
-                    );
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
-                    // `Indirect` with metadata is only for unsized types, and doesn't work with
-                    // on-stack passing.
-                    assert!(arg.layout.is_unsized() && !on_stack);
+                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
                     // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
                     // Any two ABI-compatible unsized types have the same metadata type and
                     // moreover the same metadata value leads to the same dynamic size and
@@ -402,13 +370,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
-                    assert!(arg.layout.is_sized());
-                    cx.type_ptr()
-                }
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => cx.type_ptr(),
                 PassMode::Cast { cast, pad_i32 } => {
-                    // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
-                    assert!(arg.layout.is_sized());
                     // add padding
                     if *pad_i32 {
                         llargument_tys.push(Reg::i32().llvm_type(cx));
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
index b4c7b0f120f91..f99f6a3b72164 100644
--- a/compiler/rustc_target/src/abi/call/aarch64.rs
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -40,6 +40,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !ret.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !ret.layout.is_aggregate() {
         if kind == AbiKind::DarwinPCS {
             // On Darwin, when returning an i8/i16, it must be sign-extended to 32 bits,
@@ -67,6 +71,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !arg.layout.is_aggregate() {
         if kind == AbiKind::DarwinPCS {
             // On Darwin, when passing an i8/i16, it must be sign-extended to 32 bits,
diff --git a/compiler/rustc_target/src/abi/call/arm.rs b/compiler/rustc_target/src/abi/call/arm.rs
index 1923ea58838ba..95f6691d42aeb 100644
--- a/compiler/rustc_target/src/abi/call/arm.rs
+++ b/compiler/rustc_target/src/abi/call/arm.rs
@@ -30,6 +30,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !ret.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
         return;
@@ -56,6 +60,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !arg.layout.is_aggregate() {
         arg.extend_integer_width_to(32);
         return;
diff --git a/compiler/rustc_target/src/abi/call/csky.rs b/compiler/rustc_target/src/abi/call/csky.rs
index 706493b0a6a69..8b4328db52ebb 100644
--- a/compiler/rustc_target/src/abi/call/csky.rs
+++ b/compiler/rustc_target/src/abi/call/csky.rs
@@ -7,6 +7,10 @@
 use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
 
 fn classify_ret<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     // For return type, aggregate which <= 2*XLen will be returned in registers.
     // Otherwise, aggregate will be returned indirectly.
     if arg.layout.is_aggregate() {
@@ -24,6 +28,10 @@ fn classify_ret<Ty>(arg: &mut ArgAbi<'_, Ty>) {
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     // For argument type, the first 4*XLen parts of aggregate will be passed
     // in registers, and the rest will be passed in stack.
     // So we can coerce to integers directly and let backend handle it correctly.
diff --git a/compiler/rustc_target/src/abi/call/loongarch.rs b/compiler/rustc_target/src/abi/call/loongarch.rs
index e649d58bbcab6..647b6500c52dd 100644
--- a/compiler/rustc_target/src/abi/call/loongarch.rs
+++ b/compiler/rustc_target/src/abi/call/loongarch.rs
@@ -152,6 +152,10 @@ fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u6
 where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return false; // I guess? return value of this function is not documented
+    }
     if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
         match conv {
             FloatConv::Float(f) => {
@@ -214,6 +218,10 @@ fn classify_arg<'a, Ty, C>(
 ) where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !is_vararg {
         match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
             Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
index 1d4649ed8678e..06697bdd83ee7 100644
--- a/compiler/rustc_target/src/abi/call/m68k.rs
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -9,6 +9,10 @@ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if arg.layout.is_aggregate() {
         arg.make_indirect_byval(None);
     } else {
diff --git a/compiler/rustc_target/src/abi/call/mips.rs b/compiler/rustc_target/src/abi/call/mips.rs
index edcd1bab8b4b0..57ccfe2152bdf 100644
--- a/compiler/rustc_target/src/abi/call/mips.rs
+++ b/compiler/rustc_target/src/abi/call/mips.rs
@@ -17,6 +17,10 @@ fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
 where
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     let dl = cx.data_layout();
     let size = arg.layout.size;
     let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 5efd171b9dd76..bbf21f169bb6b 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -422,7 +422,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
                 }))
             }
 
-            Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+            Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => {
                 // Helper for computing `homogeneous_aggregate`, allowing a custom
                 // starting offset (used below for handling variants).
                 let from_fields_at =
@@ -520,6 +520,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
                     Ok(result)
                 }
             }
+            Abi::Aggregate { sized: false } => Err(Heterogeneous),
         }
     }
 }
@@ -555,8 +556,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
                 scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
             ),
             Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
-            // The `Aggregate` ABI should always be adjusted later.
-            Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
+            Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
         };
         ArgAbi { layout, mode }
     }
@@ -580,14 +580,30 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
         PassMode::Indirect { attrs, meta_attrs, on_stack: false }
     }
 
+    /// Pass this argument directly instead. Should NOT be used!
+    /// Only exists because of past ABI mistakes that will take time to fix
+    /// (see <https://github.com/rust-lang/rust/issues/115666>).
+    pub fn make_direct_deprecated(&mut self) {
+        match self.mode {
+            PassMode::Indirect { .. } => {
+                self.mode = PassMode::Direct(ArgAttributes::new());
+            }
+            PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => return, // already direct
+            _ => panic!("Tried to make {:?} direct", self.mode),
+        }
+    }
+
     pub fn make_indirect(&mut self) {
         match self.mode {
-            PassMode::Direct(_) | PassMode::Pair(_, _) => {}
-            PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: false } => return,
+            PassMode::Direct(_) | PassMode::Pair(_, _) => {
+                self.mode = Self::indirect_pass_mode(&self.layout);
+            }
+            PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
+                // already indirect
+                return;
+            }
             _ => panic!("Tried to make {:?} indirect", self.mode),
         }
-
-        self.mode = Self::indirect_pass_mode(&self.layout);
     }
 
     pub fn make_indirect_byval(&mut self, byval_align: Option<Align>) {
diff --git a/compiler/rustc_target/src/abi/call/nvptx64.rs b/compiler/rustc_target/src/abi/call/nvptx64.rs
index 4abe51cd697e0..5c040ce9c3b10 100644
--- a/compiler/rustc_target/src/abi/call/nvptx64.rs
+++ b/compiler/rustc_target/src/abi/call/nvptx64.rs
@@ -4,12 +4,18 @@ use crate::abi::{HasDataLayout, TyAbiInterface};
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
     if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
         ret.make_indirect();
+    } else {
+        // FIXME: this is wrong! Need to decide which ABI we really want here.
+        ret.make_direct_deprecated();
     }
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
     if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
         arg.make_indirect();
+    } else {
+        // FIXME: this is wrong! Need to decide which ABI we really want here.
+        arg.make_direct_deprecated();
     }
 }
 
@@ -30,6 +36,9 @@ where
             _ => unreachable!("Align is given as power of 2 no larger than 16 bytes"),
         };
         arg.cast_to(Uniform { unit, total: Size::from_bytes(2 * align_bytes) });
+    } else {
+        // FIXME: find a better way to do this. See https://github.com/rust-lang/rust/issues/117271.
+        arg.make_direct_deprecated();
     }
 }
 
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
index 359bb8fc09a87..2d41f77e50e1e 100644
--- a/compiler/rustc_target/src/abi/call/powerpc64.rs
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -46,6 +46,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !ret.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(64);
         return;
@@ -89,6 +93,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !arg.layout.is_aggregate() {
         arg.extend_integer_width_to(64);
         return;
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
index 93a2045632a84..cbde234d34cc2 100644
--- a/compiler/rustc_target/src/abi/call/riscv.rs
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -158,6 +158,10 @@ fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u6
 where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return false; // I guess? return value of this function is not documented
+    }
     if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
         match conv {
             FloatConv::Float(f) => {
@@ -220,6 +224,10 @@ fn classify_arg<'a, Ty, C>(
 ) where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !is_vararg {
         match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
             Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs
index ea23692817f50..1a2191082d5de 100644
--- a/compiler/rustc_target/src/abi/call/s390x.rs
+++ b/compiler/rustc_target/src/abi/call/s390x.rs
@@ -17,6 +17,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
         arg.extend_integer_width_to(64);
         return;
diff --git a/compiler/rustc_target/src/abi/call/sparc.rs b/compiler/rustc_target/src/abi/call/sparc.rs
index edcd1bab8b4b0..57ccfe2152bdf 100644
--- a/compiler/rustc_target/src/abi/call/sparc.rs
+++ b/compiler/rustc_target/src/abi/call/sparc.rs
@@ -17,6 +17,10 @@ fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
 where
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     let dl = cx.data_layout();
     let size = arg.layout.size;
     let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
index 796b752ff9d41..a7a2b314a94f0 100644
--- a/compiler/rustc_target/src/abi/call/wasm.rs
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -34,6 +34,10 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
+    if !arg.layout.is_sized() {
+        // Not touching this...
+        return;
+    }
     arg.extend_integer_width_to(32);
     if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
         arg.make_indirect_byval(None);
@@ -67,21 +71,33 @@ where
 /// Also see <https://github.com/rust-lang/rust/issues/115666>.
 pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
     if !fn_abi.ret.is_ignore() {
-        classify_ret(&mut fn_abi.ret);
+        classify_ret_wasm_abi(&mut fn_abi.ret);
     }
 
     for arg in fn_abi.args.iter_mut() {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg(arg);
+        classify_arg_wasm_abi(arg);
     }
 
-    fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+    fn classify_ret_wasm_abi<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+        if !ret.layout.is_sized() {
+            // Not touching this...
+            return;
+        }
+        // FIXME: this is bad! https://github.com/rust-lang/rust/issues/115666
+        ret.make_direct_deprecated();
         ret.extend_integer_width_to(32);
     }
 
-    fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+    fn classify_arg_wasm_abi<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+        if !arg.layout.is_sized() {
+            // Not touching this...
+            return;
+        }
+        // FIXME: this is bad! https://github.com/rust-lang/rust/issues/115666
+        arg.make_direct_deprecated();
         arg.extend_integer_width_to(32);
     }
 }
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
index c27f1e6dddab9..e9aedc3d28a1e 100644
--- a/compiler/rustc_target/src/abi/call/x86.rs
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -14,7 +14,7 @@ where
     C: HasDataLayout + HasTargetSpec,
 {
     if !fn_abi.ret.is_ignore() {
-        if fn_abi.ret.layout.is_aggregate() {
+        if fn_abi.ret.layout.is_aggregate() && fn_abi.ret.layout.is_sized() {
             // Returning a structure. Most often, this will use
             // a hidden first argument. On some platforms, though,
             // small structs are returned as integers.
@@ -50,7 +50,7 @@ where
     }
 
     for arg in fn_abi.args.iter_mut() {
-        if arg.is_ignore() {
+        if arg.is_ignore() || !arg.layout.is_sized() {
             continue;
         }
 
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
index d1efe97769925..6c34585a11b82 100644
--- a/compiler/rustc_target/src/abi/call/x86_64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -153,9 +153,9 @@ fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg
     }
 }
 
-fn cast_target(cls: &[Option<Class>], size: Size) -> Option<CastTarget> {
+fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
     let mut i = 0;
-    let lo = reg_component(cls, &mut i, size)?;
+    let lo = reg_component(cls, &mut i, size).unwrap();
     let offset = Size::from_bytes(8) * (i as u64);
     let mut target = CastTarget::from(lo);
     if size > offset {
@@ -164,7 +164,7 @@ fn cast_target(cls: &[Option<Class>], size: Size) -> Option<CastTarget> {
         }
     }
     assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
-    Some(target)
+    target
 }
 
 const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
@@ -179,6 +179,10 @@ where
     let mut sse_regs = MAX_SSE_REGS;
 
     let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
+        if !arg.layout.is_sized() {
+            // Not touching this...
+            return;
+        }
         let mut cls_or_mem = classify_arg(cx, arg);
 
         if is_arg {
@@ -227,9 +231,7 @@ where
                 // split into sized chunks passed individually
                 if arg.layout.is_aggregate() {
                     let size = arg.layout.size;
-                    if let Some(cast_target) = cast_target(cls, size) {
-                        arg.cast_to(cast_target);
-                    }
+                    arg.cast_to(cast_target(cls, size));
                 } else {
                     arg.extend_integer_width_to(32);
                 }
diff --git a/compiler/rustc_target/src/abi/call/x86_win64.rs b/compiler/rustc_target/src/abi/call/x86_win64.rs
index 1aaf0e511ca41..90de1a42bc06b 100644
--- a/compiler/rustc_target/src/abi/call/x86_win64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_win64.rs
@@ -6,8 +6,8 @@ use crate::abi::Abi;
 pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
     let fixup = |a: &mut ArgAbi<'_, Ty>| {
         match a.layout.abi {
-            Abi::Uninhabited => {}
-            Abi::ScalarPair(..) | Abi::Aggregate { .. } => match a.layout.size.bits() {
+            Abi::Uninhabited | Abi::Aggregate { sized: false } => {}
+            Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() {
                 8 => a.cast_to(Reg::i8()),
                 16 => a.cast_to(Reg::i16()),
                 32 => a.cast_to(Reg::i32()),
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index fcf6626bbf05e..0db9c5a24a840 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -327,6 +327,76 @@ fn adjust_for_rust_scalar<'tcx>(
     }
 }
 
+/// Ensure that the ABI makes basic sense.
+fn fn_abi_sanity_check<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) {
+    fn fn_arg_sanity_check<'tcx>(
+        cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        arg: &ArgAbi<'tcx, Ty<'tcx>>,
+    ) {
+        match &arg.mode {
+            PassMode::Ignore => {}
+            PassMode::Direct(_) => {
+                // Here the Rust type is used to determine the actual ABI, so we have to be very
+                // careful. Scalar/ScalarPair is fine, since backends will generally use
+                // `layout.abi` and ignore everything else. We should just reject `Aggregate`
+                // entirely here, but some targets need to be fixed first.
+                if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
+                    // For an unsized type we'd only pass the sized prefix, so there is no universe
+                    // in which we ever want to allow this.
+                    assert!(
+                        arg.layout.is_sized(),
+                        "`PassMode::Direct` for unsized type in ABI: {:#?}",
+                        fn_abi
+                    );
+                    // This really shouldn't happen even for sized aggregates, since
+                    // `immediate_llvm_type` will use `layout.fields` to turn this Rust type into an
+                    // LLVM type. This means all sorts of Rust type details leak into the ABI.
+                    // However wasm sadly *does* currently use this mode so we have to allow it --
+                    // but we absolutely shouldn't let any more targets do that.
+                    // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+                    //
+                    // The unstable abi `PtxKernel` also uses Direct for now.
+                    // It needs to switch to something else before stabilization can happen.
+                    // (See issue: https://github.com/rust-lang/rust/issues/117271)
+                    assert!(
+                        matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64")
+                            || fn_abi.conv == Conv::PtxKernel,
+                        "`PassMode::Direct` for aggregates only allowed on wasm and `extern \"ptx-kernel\"` fns\nProblematic type: {:#?}",
+                        arg.layout,
+                    );
+                }
+            }
+            PassMode::Pair(_, _) => {
+                // Similar to `Direct`, we need to make sure that backends use `layout.abi` and
+                // ignore the rest of the layout.
+                assert!(
+                    matches!(arg.layout.abi, Abi::ScalarPair(..)),
+                    "PassMode::Pair for type {}",
+                    arg.layout.ty
+                );
+            }
+            PassMode::Cast { .. } => {
+                // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+                assert!(arg.layout.is_sized());
+            }
+            PassMode::Indirect { meta_attrs: None, .. } => {
+                // No metadata, must be sized.
+                assert!(arg.layout.is_sized());
+            }
+            PassMode::Indirect { meta_attrs: Some(_), on_stack, .. } => {
+                // With metadata. Must be unsized and not on the stack.
+                assert!(arg.layout.is_unsized() && !on_stack);
+            }
+        }
+    }
+
+    for arg in fn_abi.args.iter() {
+        fn_arg_sanity_check(cx, fn_abi, arg);
+    }
+    fn_arg_sanity_check(cx, fn_abi, &fn_abi.ret);
+}
+
 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
 // arguments of this method, into a separate `struct`.
 #[tracing::instrument(level = "debug", skip(cx, caller_location, fn_def_id, force_thin_self_ptr))]
@@ -453,6 +523,7 @@ fn fn_abi_new_uncached<'tcx>(
     };
     fn_abi_adjust_for_abi(cx, &mut fn_abi, sig.abi, fn_def_id)?;
     debug!("fn_abi_new_uncached = {:?}", fn_abi);
+    fn_abi_sanity_check(cx, &fn_abi);
     Ok(cx.tcx.arena.alloc(fn_abi))
 }
 
@@ -520,13 +591,14 @@ fn fn_abi_adjust_for_abi<'tcx>(
 
                 _ => return,
             }
-            // `Aggregate` ABI must be adjusted to ensure that ABI-compatible Rust types are passed
-            // the same way.
+            // Compute `Aggregate` ABI.
+
+            let is_indirect_not_on_stack =
+                matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
+            assert!(is_indirect_not_on_stack, "{:?}", arg);
 
             let size = arg.layout.size;
-            if arg.layout.is_unsized() || size > Pointer(AddressSpace::DATA).size(cx) {
-                arg.make_indirect();
-            } else {
+            if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
                 // We want to pass small aggregates as immediates, but using
                 // an LLVM aggregate type for this leads to bad optimizations,
                 // so we pick an appropriately sized integer type instead.
diff --git a/tests/ui/abi/compatibility.rs b/tests/ui/abi/compatibility.rs
index 0cdf229711ad0..8c605aa769112 100644
--- a/tests/ui/abi/compatibility.rs
+++ b/tests/ui/abi/compatibility.rs
@@ -1,5 +1,14 @@
 // check-pass
 // revisions: host
+// revisions: i686
+//[i686] compile-flags: --target i686-unknown-linux-gnu
+//[i686] needs-llvm-components: x86
+// revisions: x86-64
+//[x86-64] compile-flags: --target x86_64-unknown-linux-gnu
+//[x86-64] needs-llvm-components: x86
+// revisions: x86-64-win
+//[x86-64-win] compile-flags: --target x86_64-pc-windows-msvc
+//[x86-64-win] needs-llvm-components: x86
 // revisions: arm
 //[arm] compile-flags: --target arm-unknown-linux-gnueabi
 //[arm] needs-llvm-components: arm
@@ -37,9 +46,23 @@
 // revisions: wasi
 //[wasi] compile-flags: --target wasm32-wasi
 //[wasi] needs-llvm-components: webassembly
-// revisions: nvptx64
-//[nvptx64] compile-flags: --target nvptx64-nvidia-cuda
-//[nvptx64] needs-llvm-components: nvptx
+// revisions: bpf
+//[bpf] compile-flags: --target bpfeb-unknown-none
+//[bpf] needs-llvm-components: bpf
+// revisions: m68k
+//[m68k] compile-flags: --target m68k-unknown-linux-gnu
+//[m68k] needs-llvm-components: m68k
+// FIXME: disabled on nvptx64 since the target ABI fails the sanity check
+// see https://github.com/rust-lang/rust/issues/117480
+/* revisions: nvptx64
+  [nvptx64] compile-flags: --target nvptx64-nvidia-cuda
+  [nvptx64] needs-llvm-components: nvptx
+*/
+// FIXME: disabled since it fails on CI saying the csky component is missing
+/* revisions: csky
+  [csky] compile-flags: --target csky-unknown-linux-gnuabiv2
+  [csky] needs-llvm-components: csky
+*/
 #![feature(rustc_attrs, unsized_fn_params, transparent_unions)]
 #![cfg_attr(not(host), feature(no_core, lang_items), no_std, no_core)]
 #![allow(unused, improper_ctypes_definitions, internal_features)]
@@ -324,6 +347,7 @@ mod unsized_ {
     use super::*;
     test_transparent_unsized!(str_, str);
     test_transparent_unsized!(slice, [u8]);
+    test_transparent_unsized!(slice_with_prefix, (usize, [u8]));
     test_transparent_unsized!(dyn_trait, dyn Any);
 }
 
diff --git a/tests/ui/abi/issue-94223.rs b/tests/ui/abi/issue-94223.rs
deleted file mode 100644
index 79d6b94031bc2..0000000000000
--- a/tests/ui/abi/issue-94223.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-// check-pass
-#![allow(improper_ctypes_definitions)]
-#![crate_type = "lib"]
-
-// Check that computing the fn abi for `bad`, with a external ABI fn ptr that is not FFI-safe, does
-// not ICE.
-
-pub fn bad(f: extern "C" fn([u8])) {}
diff --git a/tests/ui/abi/unsized-args-in-c-abi-issues-94223-115845.rs b/tests/ui/abi/unsized-args-in-c-abi-issues-94223-115845.rs
new file mode 100644
index 0000000000000..a32cc6500f829
--- /dev/null
+++ b/tests/ui/abi/unsized-args-in-c-abi-issues-94223-115845.rs
@@ -0,0 +1,30 @@
+// check-pass
+#![allow(improper_ctypes_definitions)]
+#![feature(unsized_tuple_coercion)]
+#![feature(unsized_fn_params)]
+#![crate_type = "lib"]
+
+// Check that computing the fn abi for `bad`, with a external ABI fn ptr that is not FFI-safe, does
+// not ICE.
+
+pub fn bad(f: extern "C" fn([u8])) {}
+
+// While these get accepted, they should also not ICE.
+// (If we ever reject them, remove them from this test to ensure the `bad` above
+// is still tested. Do *not* make this a check/build-fail test.)
+
+pub extern "C" fn declare_bad(_x: str) {}
+
+#[no_mangle]
+pub extern "system" fn declare_more_bad(f: dyn FnOnce()) {
+}
+
+fn make_bad() -> extern "C" fn(([u8],)) {
+    todo!()
+}
+
+pub fn call_bad() {
+    let f = make_bad();
+    let slice: Box<([u8],)> = Box::new(([1; 8],));
+    f(*slice);
+}