diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs
index 099687bd6b082..61d2295ba8c1c 100644
--- a/src/liballoc/collections/btree/map.rs
+++ b/src/liballoc/collections/btree/map.rs
@@ -215,59 +215,6 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
             clone_subtree(self.root.as_ref().unwrap().as_ref())
         }
     }
-
-    fn clone_from(&mut self, other: &Self) {
-        BTreeClone::clone_from(self, other);
-    }
-}
-
-trait BTreeClone {
-    fn clone_from(&mut self, other: &Self);
-}
-
-impl<K: Clone, V: Clone> BTreeClone for BTreeMap<K, V> {
-    default fn clone_from(&mut self, other: &Self) {
-        *self = other.clone();
-    }
-}
-
-impl<K: Clone + Ord, V: Clone> BTreeClone for BTreeMap<K, V> {
-    fn clone_from(&mut self, other: &Self) {
-        // This truncates `self` to `other.len()` by calling `split_off` on
-        // the first key after `other.len()` elements if it exists.
-        let split_off_key = if self.len() > other.len() {
-            let diff = self.len() - other.len();
-            if diff <= other.len() {
-                self.iter().nth_back(diff - 1).map(|pair| (*pair.0).clone())
-            } else {
-                self.iter().nth(other.len()).map(|pair| (*pair.0).clone())
-            }
-        } else {
-            None
-        };
-        if let Some(key) = split_off_key {
-            self.split_off(&key);
-        }
-
-        let mut siter = self.range_mut(..);
-        let mut oiter = other.iter();
-        // After truncation, `self` is at most as long as `other` so this loop
-        // replaces every key-value pair in `self`. Since `oiter` is in sorted
-        // order and the structure of the `BTreeMap` stays the same,
-        // the BTree invariants are maintained at the end of the loop.
-        while !siter.is_empty() {
-            if let Some((ok, ov)) = oiter.next() {
-                // SAFETY: This is safe because `siter` is nonempty.
-                let (sk, sv) = unsafe { siter.next_unchecked() };
-                sk.clone_from(ok);
-                sv.clone_from(ov);
-            } else {
-                break;
-            }
-        }
-        // If `other` is longer than `self`, the remaining elements are inserted.
-        self.extend(oiter.map(|(k, v)| ((*k).clone(), (*v).clone())));
-    }
 }
 
 impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()>
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index ecec1fb039b33..3ebb5662350a7 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -108,7 +108,7 @@
 #![feature(ptr_offset_from)]
 #![feature(rustc_attrs)]
 #![feature(receiver_trait)]
-#![feature(specialization)]
+#![feature(min_specialization)]
 #![feature(staged_api)]
 #![feature(std_internals)]
 #![feature(str_internals)]
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index 2f9505ec79ffa..307f6714f3230 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -249,7 +249,7 @@ use core::mem::{self, align_of, align_of_val, forget, size_of_val};
 use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
-use core::slice::{self, from_raw_parts_mut};
+use core::slice::from_raw_parts_mut;
 
 use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
 use crate::string::String;
@@ -1221,6 +1221,12 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
     }
 }
 
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+pub(crate) trait MarkerEq: PartialEq<Self> {}
+
+impl<T: Eq> MarkerEq for T {}
+
 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
 /// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to
 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
@@ -1229,7 +1235,7 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
 ///
 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> {
+impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> {
     #[inline]
     fn eq(&self, other: &Rc<T>) -> bool {
         Rc::ptr_eq(self, other) || **self == **other
@@ -1548,25 +1554,25 @@ impl<T> iter::FromIterator<T> for Rc<[T]> {
     /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
     /// ```
     fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
-        RcFromIter::from_iter(iter.into_iter())
+        ToRcSlice::to_rc_slice(iter.into_iter())
     }
 }
 
 /// Specialization trait used for collecting into `Rc<[T]>`.
-trait RcFromIter<T, I> {
-    fn from_iter(iter: I) -> Self;
+trait ToRcSlice<T>: Iterator<Item = T> + Sized {
+    fn to_rc_slice(self) -> Rc<[T]>;
 }
 
-impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> {
-    default fn from_iter(iter: I) -> Self {
-        iter.collect::<Vec<T>>().into()
+impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I {
+    default fn to_rc_slice(self) -> Rc<[T]> {
+        self.collect::<Vec<T>>().into()
     }
 }
 
-impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> {
-    default fn from_iter(iter: I) -> Self {
+impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
+    fn to_rc_slice(self) -> Rc<[T]> {
         // This is the case for a `TrustedLen` iterator.
-        let (low, high) = iter.size_hint();
+        let (low, high) = self.size_hint();
         if let Some(high) = high {
             debug_assert_eq!(
                 low,
@@ -1577,29 +1583,15 @@ impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> {
 
             unsafe {
                 // SAFETY: We need to ensure that the iterator has an exact length and we have.
-                Rc::from_iter_exact(iter, low)
+                Rc::from_iter_exact(self, low)
             }
         } else {
             // Fall back to normal implementation.
-            iter.collect::<Vec<T>>().into()
+            self.collect::<Vec<T>>().into()
         }
     }
 }
 
-impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> {
-    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
-        // Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`.
-        //
-        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
-        // which is even more performant.
-        //
-        // In the fall-back case we have `T: Clone`. This is still better
-        // than the `TrustedLen` implementation as slices have a known length
-        // and so we get to avoid calling `size_hint` and avoid the branching.
-        iter.as_slice().into()
-    }
-}
-
 /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
 /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
 /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs
index a81e0cf7e1d2a..2b71378619c05 100644
--- a/src/liballoc/sync.rs
+++ b/src/liballoc/sync.rs
@@ -20,7 +20,7 @@ use core::mem::{self, align_of, align_of_val, size_of_val};
 use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
-use core::slice::{self, from_raw_parts_mut};
+use core::slice::from_raw_parts_mut;
 use core::sync::atomic;
 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
 
@@ -1779,7 +1779,7 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
 ///
 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
+impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
     #[inline]
     fn eq(&self, other: &Arc<T>) -> bool {
         Arc::ptr_eq(self, other) || **self == **other
@@ -2105,25 +2105,25 @@ impl<T> iter::FromIterator<T> for Arc<[T]> {
     /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
     /// ```
     fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
-        ArcFromIter::from_iter(iter.into_iter())
+        ToArcSlice::to_arc_slice(iter.into_iter())
     }
 }
 
 /// Specialization trait used for collecting into `Arc<[T]>`.
-trait ArcFromIter<T, I> {
-    fn from_iter(iter: I) -> Self;
+trait ToArcSlice<T>: Iterator<Item = T> + Sized {
+    fn to_arc_slice(self) -> Arc<[T]>;
 }
 
-impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
-    default fn from_iter(iter: I) -> Self {
-        iter.collect::<Vec<T>>().into()
+impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
+    default fn to_arc_slice(self) -> Arc<[T]> {
+        self.collect::<Vec<T>>().into()
     }
 }
 
-impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
-    default fn from_iter(iter: I) -> Self {
+impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
+    fn to_arc_slice(self) -> Arc<[T]> {
         // This is the case for a `TrustedLen` iterator.
-        let (low, high) = iter.size_hint();
+        let (low, high) = self.size_hint();
         if let Some(high) = high {
             debug_assert_eq!(
                 low,
@@ -2134,29 +2134,15 @@ impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
 
             unsafe {
                 // SAFETY: We need to ensure that the iterator has an exact length and we have.
-                Arc::from_iter_exact(iter, low)
+                Arc::from_iter_exact(self, low)
             }
         } else {
             // Fall back to normal implementation.
-            iter.collect::<Vec<T>>().into()
+            self.collect::<Vec<T>>().into()
         }
     }
 }
 
-impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
-    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
-        // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
-        //
-        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
-        // which is even more performant.
-        //
-        // In the fall-back case we have `T: Clone`. This is still better
-        // than the `TrustedLen` implementation as slices have a known length
-        // and so we get to avoid calling `size_hint` and avoid the branching.
-        iter.as_slice().into()
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
     fn borrow(&self) -> &T {
diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs
index b4a9da847876d..624ca2820d758 100644
--- a/src/liballoc/vec.rs
+++ b/src/liballoc/vec.rs
@@ -1619,8 +1619,8 @@ impl<T: Default> Vec<T> {
     #[unstable(feature = "vec_resize_default", issue = "41758")]
     #[rustc_deprecated(
         reason = "This is moving towards being removed in favor \
-        of `.resize_with(Default::default)`.  If you disagree, please comment \
-        in the tracking issue.",
+                  of `.resize_with(Default::default)`.  If you disagree, please comment \
+                  in the tracking issue.",
         since = "1.33.0"
     )]
     pub fn resize_default(&mut self, new_len: usize) {
@@ -1825,6 +1825,7 @@ impl<T: Clone + IsZero> SpecFromElem for T {
     }
 }
 
+#[rustc_specialization_trait]
 unsafe trait IsZero {
     /// Whether this value is zero
     fn is_zero(&self) -> bool;
@@ -1874,9 +1875,12 @@ unsafe impl<T> IsZero for *mut T {
     }
 }
 
-// `Option<&T>`, `Option<&mut T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
-// For fat pointers, the bytes that would be the pointer metadata in the `Some` variant
-// are padding in the `None` variant, so ignoring them and zero-initializing instead is ok.
+// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
+// For fat pointers, the bytes that would be the pointer metadata in the `Some`
+// variant are padding in the `None` variant, so ignoring them and
+// zero-initializing instead is ok.
+// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
+// `SpecFromElem`.
 
 unsafe impl<T: ?Sized> IsZero for Option<&T> {
     #[inline]
@@ -1885,13 +1889,6 @@ unsafe impl<T: ?Sized> IsZero for Option<&T> {
     }
 }
 
-unsafe impl<T: ?Sized> IsZero for Option<&mut T> {
-    #[inline]
-    fn is_zero(&self) -> bool {
-        self.is_none()
-    }
-}
-
 unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
     #[inline]
     fn is_zero(&self) -> bool {
diff --git a/src/libcore/iter/traits/marker.rs b/src/libcore/iter/traits/marker.rs
index 404cc84495c96..a9ba3908c3898 100644
--- a/src/libcore/iter/traits/marker.rs
+++ b/src/libcore/iter/traits/marker.rs
@@ -13,6 +13,7 @@
 /// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse
 /// [`Fuse`]: ../../std/iter/struct.Fuse.html
 #[stable(feature = "fused", since = "1.26.0")]
+#[rustc_unsafe_specialization_marker]
 pub trait FusedIterator: Iterator {}
 
 #[stable(feature = "fused", since = "1.26.0")]
@@ -38,6 +39,7 @@ impl<I: FusedIterator + ?Sized> FusedIterator for &mut I {}
 /// [`usize::MAX`]: ../../std/usize/constant.MAX.html
 /// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint
 #[unstable(feature = "trusted_len", issue = "37572")]
+#[rustc_unsafe_specialization_marker]
 pub unsafe trait TrustedLen: Iterator {}
 
 #[unstable(feature = "trusted_len", issue = "37572")]
diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs
index 09a8b417e6e22..339b07119c6d5 100644
--- a/src/libcore/marker.rs
+++ b/src/libcore/marker.rs
@@ -363,6 +363,13 @@ pub trait StructuralEq {
 /// [impls]: #implementors
 #[stable(feature = "rust1", since = "1.0.0")]
 #[lang = "copy"]
+// FIXME(matthewjasper) This allows copying a type that doesn't implement
+// `Copy` because of unsatisfied lifetime bounds (copying `A<'_>` when only
+// `A<'static>: Copy` and `A<'_>: Clone`).
+// We have this attribute here for now only because there are quite a few
+// existing specializations on `Copy` that already exist in the standard
+// library, and there's no way to safely have this behavior right now.
+#[rustc_unsafe_specialization_marker]
 pub trait Copy: Clone {
     // Empty.
 }