diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index bdbc59821de2f..bede10df39eb9 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -172,7 +172,7 @@ impl TypedArena { fn can_allocate(&self, additional: usize) -> bool { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - let available_bytes = self.end.get().addr() - self.ptr.get().addr(); + let available_bytes = self.end.get().bare_addr() - self.ptr.get().bare_addr(); let additional_bytes = additional.checked_mul(mem::size_of::()).unwrap(); available_bytes >= additional_bytes } @@ -245,7 +245,7 @@ impl TypedArena { if mem::needs_drop::() { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - let used_bytes = self.ptr.get().addr() - last_chunk.start().addr(); + let used_bytes = self.ptr.get().bare_addr() - last_chunk.start().bare_addr(); last_chunk.entries = used_bytes / mem::size_of::(); } @@ -271,9 +271,9 @@ impl TypedArena { // chunks. fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk) { // Determine how much was filled. - let start = last_chunk.start().addr(); + let start = last_chunk.start().bare_addr(); // We obtain the value of the pointer to the first uninitialized element. - let end = self.ptr.get().addr(); + let end = self.ptr.get().bare_addr(); // We then calculate the number of elements to be dropped in the last chunk, // which is the filled area's length. let diff = if mem::size_of::() == 0 { @@ -396,11 +396,11 @@ impl DroplessArena { self.start.set(chunk.start()); // Align the end to DROPLESS_ALIGNMENT. - let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT); + let end = align_down(chunk.end().bare_addr(), DROPLESS_ALIGNMENT); // Make sure we don't go past `start`. This should not happen since the allocation // should be at least DROPLESS_ALIGNMENT - 1 bytes. - debug_assert!(chunk.start().addr() <= end); + debug_assert!(chunk.start().bare_addr() <= end); self.end.set(chunk.end().with_addr(end)); @@ -415,9 +415,9 @@ impl DroplessArena { // This loop executes once or twice: if allocation fails the first // time, the `grow` ensures it will succeed the second time. loop { - let start = self.start.get().addr(); + let start = self.start.get().bare_addr(); let old_end = self.end.get(); - let end = old_end.addr(); + let end = old_end.bare_addr(); // Align allocated bytes so that `self.end` stays aligned to // DROPLESS_ALIGNMENT. diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs index 295e27691090c..fde99b808bca4 100644 --- a/compiler/rustc_codegen_ssa/src/mono_item.rs +++ b/compiler/rustc_codegen_ssa/src/mono_item.rs @@ -138,7 +138,7 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> { fn to_raw_string(&self) -> String { match *self { MonoItem::Fn(instance) => { - format!("Fn({:?}, {})", instance.def, instance.args.as_ptr().addr()) + format!("Fn({:?}, {})", instance.def, instance.args.as_ptr().bare_addr()) } MonoItem::Static(id) => format!("Static({id:?})"), MonoItem::GlobalAsm(id) => format!("GlobalAsm({id:?})"), diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs index ff4208def319d..cd9ea972f2f9b 100644 --- a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs +++ b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs @@ -104,7 +104,7 @@ where #[inline] pub fn tag(&self) -> T { // Unpack the tag, according to the `self.packed` encoding scheme - let tag = self.packed.addr().get() >> Self::TAG_BIT_SHIFT; + let tag = self.packed.bare_addr().get() >> Self::TAG_BIT_SHIFT; // Safety: // The shift retrieves the original value from `T::into_usize`, diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl index 220da19a29dc8..57005dcdf4cd2 100644 --- a/compiler/rustc_hir_typeck/messages.ftl +++ b/compiler/rustc_hir_typeck/messages.ftl @@ -90,8 +90,8 @@ hir_typeck_lossy_provenance_int2ptr = hir_typeck_lossy_provenance_ptr2int = under strict provenance it is considered bad style to cast pointer `{$expr_ty}` to integer `{$cast_ty}` - .suggestion = use `.addr()` to obtain the address of a pointer - .help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead + .suggestion = use `.bare_addr()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead + .help = if you need to cast the address back to a pointer later, use `.expose_addr()` instead hir_typeck_method_call_on_unknown_raw_pointee = cannot call a method on a raw pointer with an unknown pointee type diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index 1d324f128f230..7ef5593831aae 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -256,7 +256,7 @@ pub enum LossyProvenancePtr2IntSuggestion<'tcx> { NeedsParensCast { #[suggestion_part(code = "(")] expr_span: Span, - #[suggestion_part(code = ").addr() as {cast_ty}")] + #[suggestion_part(code = ").bare_addr() as {cast_ty}")] cast_span: Span, cast_ty: Ty<'tcx>, }, @@ -264,12 +264,12 @@ pub enum LossyProvenancePtr2IntSuggestion<'tcx> { NeedsParens { #[suggestion_part(code = "(")] expr_span: Span, - #[suggestion_part(code = ").addr()")] + #[suggestion_part(code = ").bare_addr()")] cast_span: Span, }, #[suggestion( hir_typeck_suggestion, - code = ".addr() as {cast_ty}", + code = ".bare_addr() as {cast_ty}", applicability = "maybe-incorrect" )] NeedsCast { @@ -277,7 +277,7 @@ pub enum LossyProvenancePtr2IntSuggestion<'tcx> { cast_span: Span, cast_ty: Ty<'tcx>, }, - #[suggestion(hir_typeck_suggestion, code = ".addr()", applicability = "maybe-incorrect")] + #[suggestion(hir_typeck_suggestion, code = ".bare_addr()", applicability = "maybe-incorrect")] Other { #[primary_span] cast_span: Span, diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs index c87ef870a084e..d36a0e04cf168 100644 --- a/compiler/rustc_middle/src/ty/generic_args.rs +++ b/compiler/rustc_middle/src/ty/generic_args.rs @@ -149,7 +149,7 @@ impl<'tcx> GenericArg<'tcx> { // pointers were originally created from `Interned` types in `pack()`, // and this is just going in the other direction. unsafe { - match self.ptr.addr().get() & TAG_MASK { + match self.ptr.bare_addr().get() & TAG_MASK { REGION_TAG => GenericArgKind::Lifetime(ty::Region(Interned::new_unchecked( ptr.cast::>().as_ref(), ))), diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index f005a24050442..172103b18f4c9 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -624,7 +624,7 @@ impl<'tcx> Term<'tcx> { // pointers were originally created from `Interned` types in `pack()`, // and this is just going in the other direction. unsafe { - match self.ptr.addr().get() & TAG_MASK { + match self.ptr.bare_addr().get() & TAG_MASK { TYPE_TAG => TermKind::Ty(Ty(Interned::new_unchecked( ptr.cast::>>().as_ref(), ))), diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index 084157b97ab41..908c7a9b3bc2a 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -2840,7 +2840,7 @@ impl Weak { } pub(crate) fn is_dangling(ptr: *const T) -> bool { - (ptr.cast::<()>()).addr() == usize::MAX + (ptr.cast::<()>()).bare_addr() == usize::MAX } /// Helper type to allow accessing the reference counts without diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index dfd42ca06193a..ec9d06794ceb5 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -222,7 +222,7 @@ impl Iterator for IntoIter { #[inline] fn size_hint(&self) -> (usize, Option) { let exact = if T::IS_ZST { - self.end.addr().wrapping_sub(self.ptr.as_ptr().addr()) + self.end.bare_addr().wrapping_sub(self.ptr.as_ptr().bare_addr()) } else { unsafe { non_null!(self.end, T).sub_ptr(self.ptr) } }; diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs index bfdd28a7399fd..0887d334eca8b 100644 --- a/library/core/src/hash/mod.rs +++ b/library/core/src/hash/mod.rs @@ -956,7 +956,7 @@ mod impls { #[inline] fn hash(&self, state: &mut H) { let (address, metadata) = self.to_raw_parts(); - state.write_usize(address.addr()); + state.write_usize(address.bare_addr()); metadata.hash(state); } } @@ -966,7 +966,7 @@ mod impls { #[inline] fn hash(&self, state: &mut H) { let (address, metadata) = self.to_raw_parts(); - state.write_usize(address.addr()); + state.write_usize(address.bare_addr()); metadata.hash(state); } } diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 96e667d63c5f3..afaa6fb15a00c 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -1267,7 +1267,7 @@ extern "rust-intrinsic" { /// - If the code just wants to store data of arbitrary type in some buffer and needs to pick a /// type for that buffer, it can use [`MaybeUninit`][crate::mem::MaybeUninit]. /// - If the code actually wants to work on the address the pointer points to, it can use `as` - /// casts or [`ptr.addr()`][pointer::addr]. + /// casts or [`ptr.bare_addr()`][pointer::bare_addr]. /// /// Turning a `*mut T` into an `&mut T`: /// @@ -2781,8 +2781,8 @@ pub(crate) fn is_valid_allocation_size(size: usize, len: usize) -> bool { /// `count * size` do *not* overlap. #[inline] pub(crate) fn is_nonoverlapping(src: *const (), dst: *const (), size: usize, count: usize) -> bool { - let src_usize = src.addr(); - let dst_usize = dst.addr(); + let src_usize = src.bare_addr(); + let dst_usize = dst.bare_addr(); let Some(size) = size.checked_mul(count) else { crate::panicking::panic_nounwind( "is_nonoverlapping: `size_of::() * count` overflows a usize", diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 85a56d37ab75c..3135278460174 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -35,7 +35,7 @@ impl *const T { pub const fn is_null(self) -> bool { #[inline] fn runtime_impl(ptr: *const u8) -> bool { - ptr.addr() == 0 + ptr.bare_addr() == 0 } #[inline] @@ -203,7 +203,7 @@ impl *const T { #[must_use] #[inline(always)] #[unstable(feature = "strict_provenance", issue = "95228")] - pub fn addr(self) -> usize { + pub fn bare_addr(self) -> usize { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the // provenance). @@ -223,7 +223,7 @@ impl *const T { /// Provenance][super#strict-provenance] rules. Supporting /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by /// tools that help you to stay conformant with the Rust memory model, so it is recommended to - /// use [`addr`][pointer::addr] wherever possible. + /// use [`bare_addr`][pointer::bare_addr] wherever possible. /// /// On most platforms this will produce a value with the same bytes as the original pointer, /// because all the bytes are dedicated to describing the address. Platforms which need to store @@ -264,7 +264,7 @@ impl *const T { // In the mean-time, this operation is defined to be "as if" it was // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. - let self_addr = self.addr() as isize; + let self_addr = self.bare_addr() as isize; let dest_addr = addr as isize; let offset = dest_addr.wrapping_sub(self_addr); @@ -282,7 +282,7 @@ impl *const T { #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self { - self.with_addr(f(self.addr())) + self.with_addr(f(self.bare_addr())) } /// Decompose a (possibly wide) pointer into its data pointer and metadata components. @@ -592,7 +592,7 @@ impl *const T { /// let tagged_ptr = ptr.map_addr(|a| a | 0b10); /// /// // Get the "tag" back - /// let tag = tagged_ptr.addr() & tag_mask; + /// let tag = tagged_ptr.bare_addr() & tag_mask; /// assert_eq!(tag, 0b10); /// /// // Note that `tagged_ptr` is unaligned, it's UB to read from it. @@ -664,7 +664,7 @@ impl *const T { /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - /// origin as isize) / mem::size_of::()`. - // FIXME: recommend `addr()` instead of `as usize` once that is stable. + // FIXME: recommend `bare_addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add /// [allocated object]: crate::ptr#allocated-object @@ -1611,7 +1611,7 @@ impl *const T { #[inline] fn runtime_impl(ptr: *const (), align: usize) -> bool { - ptr.addr() & (align - 1) == 0 + ptr.bare_addr() & (align - 1) == 0 } #[inline] diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index 018efd4b9b34b..23bc0077a3ec5 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -202,7 +202,7 @@ //! we provide the [`map_addr`][] method. //! //! To help make it clear that code is "following" Strict Provenance semantics, we also provide an -//! [`addr`][] method which promises that the returned address is not part of a +//! [`bare_addr`][] method which promises that the returned address is not part of a //! pointer-usize-pointer roundtrip. In the future we may provide a lint for pointer<->integer //! casts to help you audit if your code conforms to strict provenance. //! @@ -239,7 +239,7 @@ //! let tagged = ptr.map_addr(|addr| addr | HAS_DATA); //! //! // Check the flag: -//! if tagged.addr() & HAS_DATA != 0 { +//! if tagged.bare_addr() & HAS_DATA != 0 { //! // Untag and read the pointer //! let data = *tagged.map_addr(|addr| addr & FLAG_MASK); //! assert_eq!(data, 17); @@ -294,7 +294,7 @@ //! particular platform, and it's an open question as to how to specify this (if at all). //! Notably, [CHERI][] relies on a compression scheme that can't handle a //! pointer getting offset "too far" out of bounds. If this happens, the address -//! returned by `addr` will be the value you expect, but the provenance will get invalidated +//! returned by `bare_addr` will be the value you expect, but the provenance will get invalidated //! and using it to read/write will fault. The details of this are architecture-specific //! and based on alignment, but the buffer on either side of the pointer's range is pretty //! generous (think kilobytes, not bytes). @@ -342,7 +342,7 @@ //! //! Exposed Provenance is provided by the [`expose_addr`] and [`from_exposed_addr`] methods, which //! are meant to replace `as` casts between pointers and integers. [`expose_addr`] is a lot like -//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed' +//! [`bare_addr`], but additionally adds the provenance of the pointer to a global list of 'exposed' //! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but //! is not materialized in actual executions, except in tools like [Miri].) [`from_exposed_addr`] //! can be used to construct a pointer with one of these previously 'exposed' provenances. @@ -372,7 +372,7 @@ //! [`wrapping_offset`]: pointer::wrapping_offset //! [`with_addr`]: pointer::with_addr //! [`map_addr`]: pointer::map_addr -//! [`addr`]: pointer::addr +//! [`bare_addr`]: pointer::bare_addr //! [`ptr::dangling`]: core::ptr::dangling //! [`expose_addr`]: pointer::expose_addr //! [`from_exposed_addr`]: from_exposed_addr diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index 28ba26f5c16c4..1eee2c489fb1a 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -35,7 +35,7 @@ impl *mut T { pub const fn is_null(self) -> bool { #[inline] fn runtime_impl(ptr: *mut u8) -> bool { - ptr.addr() == 0 + ptr.bare_addr() == 0 } #[inline] @@ -211,7 +211,7 @@ impl *mut T { #[must_use] #[inline(always)] #[unstable(feature = "strict_provenance", issue = "95228")] - pub fn addr(self) -> usize { + pub fn bare_addr(self) -> usize { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the // provenance). @@ -231,7 +231,7 @@ impl *mut T { /// Provenance][super#strict-provenance] rules. Supporting /// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported /// by tools that help you to stay conformant with the Rust memory model, so it is recommended - /// to use [`addr`][pointer::addr] wherever possible. + /// to use [`bare_addr`][pointer::bare_addr] wherever possible. /// /// On most platforms this will produce a value with the same bytes as the original pointer, /// because all the bytes are dedicated to describing the address. Platforms which need to store @@ -272,7 +272,7 @@ impl *mut T { // In the mean-time, this operation is defined to be "as if" it was // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. - let self_addr = self.addr() as isize; + let self_addr = self.bare_addr() as isize; let dest_addr = addr as isize; let offset = dest_addr.wrapping_sub(self_addr); @@ -290,7 +290,7 @@ impl *mut T { #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self { - self.with_addr(f(self.addr())) + self.with_addr(f(self.bare_addr())) } /// Decompose a (possibly wide) pointer into its data pointer and metadata components. @@ -607,7 +607,7 @@ impl *mut T { /// let tagged_ptr = ptr.map_addr(|a| a | 0b10); /// /// // Get the "tag" back - /// let tag = tagged_ptr.addr() & tag_mask; + /// let tag = tagged_ptr.bare_addr() & tag_mask; /// assert_eq!(tag, 0b10); /// /// // Note that `tagged_ptr` is unaligned, it's UB to read from/write to it. @@ -839,7 +839,7 @@ impl *mut T { /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - /// origin as isize) / mem::size_of::()`. - // FIXME: recommend `addr()` instead of `as usize` once that is stable. + // FIXME: recommend `bare_addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add /// [allocated object]: crate::ptr#allocated-object @@ -1884,7 +1884,7 @@ impl *mut T { #[inline] fn runtime_impl(ptr: *mut (), align: usize) -> bool { - ptr.addr() & (align - 1) == 0 + ptr.bare_addr() & (align - 1) == 0 } #[inline] diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index acb8c552a6338..52375327166d7 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -284,17 +284,17 @@ impl NonNull { /// Gets the "address" portion of the pointer. /// - /// For more details see the equivalent method on a raw pointer, [`pointer::addr`]. + /// For more details see the equivalent method on a raw pointer, [`pointer::bare_addr`]. /// /// This API and its claimed semantics are part of the Strict Provenance experiment, /// see the [`ptr` module documentation][crate::ptr]. #[must_use] #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] - pub fn addr(self) -> NonZero { + pub fn bare_addr(self) -> NonZero { // SAFETY: The pointer is guaranteed by the type to be non-null, // meaning that the address will be non-zero. - unsafe { NonZero::new_unchecked(self.pointer.addr()) } + unsafe { NonZero::new_unchecked(self.pointer.bare_addr()) } } /// Creates a new pointer with the given address. @@ -321,7 +321,7 @@ impl NonNull { #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] pub fn map_addr(self, f: impl FnOnce(NonZero) -> NonZero) -> Self { - self.with_addr(f(self.addr())) + self.with_addr(f(self.bare_addr())) } /// Acquires the underlying `*mut` pointer. @@ -803,7 +803,7 @@ impl NonNull { /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - /// origin as isize) / mem::size_of::()`. - // FIXME: recommend `addr()` instead of `as usize` once that is stable. + // FIXME: recommend `bare_addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add /// [allocated object]: crate::ptr#allocated-object @@ -839,10 +839,10 @@ impl NonNull { /// /// let ptr1 = NonNull::new(Box::into_raw(Box::new(0u8))).unwrap(); /// let ptr2 = NonNull::new(Box::into_raw(Box::new(1u8))).unwrap(); - /// let diff = (ptr2.addr().get() as isize).wrapping_sub(ptr1.addr().get() as isize); + /// let diff = (ptr2.bare_addr().get() as isize).wrapping_sub(ptr1.bare_addr().get() as isize); /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1. /// let ptr2_other = NonNull::new(ptr1.as_ptr().wrapping_byte_offset(diff)).unwrap(); - /// assert_eq!(ptr2.addr(), ptr2_other.addr()); + /// assert_eq!(ptr2.bare_addr(), ptr2_other.bare_addr()); /// // Since ptr2_other and ptr2 are derived from pointers to different objects, /// // computing their offset is undefined behavior, even though /// // they point to the same address! diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 7910981d0f5ee..6aa89ad24a7fa 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -26,7 +26,7 @@ macro_rules! if_zst { #![allow(unused_unsafe)] // we're sometimes used within an unsafe block if T::IS_ZST { - let $len = $this.end_or_len.addr(); + let $len = $this.end_or_len.bare_addr(); $zst_body } else { // SAFETY: for non-ZSTs, the type invariant ensures it cannot be null diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 993a608f42b60..d5cf16b678bf5 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -342,7 +342,7 @@ where assert!(mem::size_of::() > 0); // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - (r.addr() - l.addr()) / mem::size_of::() + (r.bare_addr() - l.bare_addr()) / mem::size_of::() } loop { diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 45193c11e1d6b..b5bb2165e0bb1 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1752,9 +1752,9 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).bare_addr(), 0); /// // Note: units of `size_of::()`. - /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); + /// assert_eq!(atom.load(Ordering::Relaxed).bare_addr(), 8); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] @@ -1832,9 +1832,9 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).bare_addr(), 0); /// // Note: in units of bytes, not `size_of::()`. - /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); + /// assert_eq!(atom.load(Ordering::Relaxed).bare_addr(), 1); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] @@ -1868,8 +1868,8 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::without_provenance_mut(1)); - /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1); - /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).bare_addr(), 1); + /// assert_eq!(atom.load(Ordering::Relaxed).bare_addr(), 0); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] @@ -1916,10 +1916,10 @@ impl AtomicPtr { /// /// let atom = AtomicPtr::::new(pointer); /// // Tag the bottom bit of the pointer. - /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0); + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).bare_addr() & 1, 0); /// // Extract and untag. /// let tagged = atom.load(Ordering::Relaxed); - /// assert_eq!(tagged.addr() & 1, 1); + /// assert_eq!(tagged.bare_addr() & 1, 1); /// assert_eq!(tagged.map_addr(|p| p & !1), pointer); /// ``` #[inline] @@ -1966,7 +1966,7 @@ impl AtomicPtr { /// let pointer = &mut 3i64 as *mut i64; /// // A tagged pointer /// let atom = AtomicPtr::::new(pointer.map_addr(|a| a | 1)); - /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1); + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).bare_addr() & 1, 1); /// // Untag, and extract the previously tagged pointer. /// let untagged = atom.fetch_and(!1, Ordering::Relaxed) /// .map_addr(|a| a & !1); @@ -2018,7 +2018,7 @@ impl AtomicPtr { /// /// // Toggle a tag bit on the pointer. /// atom.fetch_xor(1, Ordering::Relaxed); - /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1); + /// assert_eq!(atom.load(Ordering::Relaxed).bare_addr() & 1, 1); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 0d1c72a689291..0784c79681efd 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -131,17 +131,17 @@ fn int_max() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_add_null() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0); - assert_eq!(atom.load(SeqCst).addr(), 8); + assert_eq!(atom.fetch_ptr_add(1, SeqCst).bare_addr(), 0); + assert_eq!(atom.load(SeqCst).bare_addr(), 8); - assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8); - assert_eq!(atom.load(SeqCst).addr(), 9); + assert_eq!(atom.fetch_byte_add(1, SeqCst).bare_addr(), 8); + assert_eq!(atom.load(SeqCst).bare_addr(), 9); - assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9); - assert_eq!(atom.load(SeqCst).addr(), 1); + assert_eq!(atom.fetch_ptr_sub(1, SeqCst).bare_addr(), 9); + assert_eq!(atom.load(SeqCst).bare_addr(), 1); - assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1); - assert_eq!(atom.load(SeqCst).addr(), 0); + assert_eq!(atom.fetch_byte_sub(1, SeqCst).bare_addr(), 1); + assert_eq!(atom.load(SeqCst).bare_addr(), 0); } #[test] @@ -174,14 +174,14 @@ fn ptr_add_data() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_bitops() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0); - assert_eq!(atom.load(SeqCst).addr(), 0b0111); + assert_eq!(atom.fetch_or(0b0111, SeqCst).bare_addr(), 0); + assert_eq!(atom.load(SeqCst).bare_addr(), 0b0111); - assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111); - assert_eq!(atom.load(SeqCst).addr(), 0b0101); + assert_eq!(atom.fetch_and(0b1101, SeqCst).bare_addr(), 0b0111); + assert_eq!(atom.load(SeqCst).bare_addr(), 0b0101); - assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101); - assert_eq!(atom.load(SeqCst).addr(), 0b1010); + assert_eq!(atom.fetch_xor(0b1111, SeqCst).bare_addr(), 0b0101); + assert_eq!(atom.load(SeqCst).bare_addr(), 0b1010); } #[test] @@ -197,7 +197,7 @@ fn ptr_bitops_tagging() { const MASK_TAG: usize = 0b1111; const MASK_PTR: usize = !MASK_TAG; - assert_eq!(ptr.addr() & MASK_TAG, 0); + assert_eq!(ptr.bare_addr() & MASK_TAG, 0); assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr); assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111)); diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs index 0f7fde747690a..26bd4f0b6ab7b 100644 --- a/library/core/tests/mem.rs +++ b/library/core/tests/mem.rs @@ -560,10 +560,22 @@ fn offset_of_addr() { let base = Foo { x: 0, y: 0, z: Bar(0, 0) }; - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, x), ptr::addr_of!(base.x).addr()); - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, y), ptr::addr_of!(base.y).addr()); - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, z.0), ptr::addr_of!(base.z.0).addr()); - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, z.1), ptr::addr_of!(base.z.1).addr()); + assert_eq!( + ptr::addr_of!(base).bare_addr() + offset_of!(Foo, x), + ptr::addr_of!(base.x).bare_addr() + ); + assert_eq!( + ptr::addr_of!(base).bare_addr() + offset_of!(Foo, y), + ptr::addr_of!(base.y).bare_addr() + ); + assert_eq!( + ptr::addr_of!(base).bare_addr() + offset_of!(Foo, z.0), + ptr::addr_of!(base.z.0).bare_addr() + ); + assert_eq!( + ptr::addr_of!(base).bare_addr() + offset_of!(Foo, z.1), + ptr::addr_of!(base.z.1).bare_addr() + ); } #[test] diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 659fbd255c168..33daaa2f73537 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -1056,7 +1056,7 @@ fn nonnull_tagged_pointer_with_provenance() { /// Consume this tagged pointer and produce the data it carries. pub fn tag(&self) -> usize { - self.0.addr().get() & Self::DATA_MASK + self.0.bare_addr().get() & Self::DATA_MASK } /// Update the data this tagged pointer carries to a new value. diff --git a/library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs b/library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs index 5b4615ce51d79..eb2007ec2e4ab 100644 --- a/library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs +++ b/library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs @@ -83,12 +83,12 @@ where #[inline] fn simd_eq(self, other: Self) -> Self::Mask { - self.addr().simd_eq(other.addr()) + self.bare_addr().simd_eq(other.bare_addr()) } #[inline] fn simd_ne(self, other: Self) -> Self::Mask { - self.addr().simd_ne(other.addr()) + self.bare_addr().simd_ne(other.bare_addr()) } } @@ -100,11 +100,11 @@ where #[inline] fn simd_eq(self, other: Self) -> Self::Mask { - self.addr().simd_eq(other.addr()) + self.bare_addr().simd_eq(other.bare_addr()) } #[inline] fn simd_ne(self, other: Self) -> Self::Mask { - self.addr().simd_ne(other.addr()) + self.bare_addr().simd_ne(other.bare_addr()) } } diff --git a/library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs b/library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs index 899f00a831641..cc0a88b6fe965 100644 --- a/library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs +++ b/library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs @@ -224,22 +224,22 @@ where { #[inline] fn simd_lt(self, other: Self) -> Self::Mask { - self.addr().simd_lt(other.addr()) + self.bare_addr().simd_lt(other.bare_addr()) } #[inline] fn simd_le(self, other: Self) -> Self::Mask { - self.addr().simd_le(other.addr()) + self.bare_addr().simd_le(other.bare_addr()) } #[inline] fn simd_gt(self, other: Self) -> Self::Mask { - self.addr().simd_gt(other.addr()) + self.bare_addr().simd_gt(other.bare_addr()) } #[inline] fn simd_ge(self, other: Self) -> Self::Mask { - self.addr().simd_ge(other.addr()) + self.bare_addr().simd_ge(other.bare_addr()) } } @@ -274,22 +274,22 @@ where { #[inline] fn simd_lt(self, other: Self) -> Self::Mask { - self.addr().simd_lt(other.addr()) + self.bare_addr().simd_lt(other.bare_addr()) } #[inline] fn simd_le(self, other: Self) -> Self::Mask { - self.addr().simd_le(other.addr()) + self.bare_addr().simd_le(other.bare_addr()) } #[inline] fn simd_gt(self, other: Self) -> Self::Mask { - self.addr().simd_gt(other.addr()) + self.bare_addr().simd_gt(other.bare_addr()) } #[inline] fn simd_ge(self, other: Self) -> Self::Mask { - self.addr().simd_ge(other.addr()) + self.bare_addr().simd_ge(other.bare_addr()) } } diff --git a/library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs b/library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs index e217d1c8c87ca..ba1e0665639b6 100644 --- a/library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs +++ b/library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs @@ -39,8 +39,8 @@ pub trait SimdConstPtr: Copy + Sealed { /// This method semantically discards *provenance* and /// *address-space* information. To properly restore that information, use [`Self::with_addr`]. /// - /// Equivalent to calling [`pointer::addr`] on each element. - fn addr(self) -> Self::Usize; + /// Equivalent to calling [`pointer::bare_addr`] on each element. + fn bare_addr(self) -> Self::Usize; /// Creates a new pointer with the given address. /// @@ -111,7 +111,7 @@ where } #[inline] - fn addr(self) -> Self::Usize { + fn bare_addr(self) -> Self::Usize { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the // provenance). @@ -126,7 +126,7 @@ where // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. self.cast::() - .wrapping_offset(addr.cast::() - self.addr().cast::()) + .wrapping_offset(addr.cast::() - self.bare_addr().cast::()) .cast() } diff --git a/library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs b/library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs index 5cb27af4fdeba..c8170a6064825 100644 --- a/library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs +++ b/library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs @@ -36,8 +36,8 @@ pub trait SimdMutPtr: Copy + Sealed { /// This method discards pointer semantic metadata, so the result cannot be /// directly cast into a valid pointer. /// - /// Equivalent to calling [`pointer::addr`] on each element. - fn addr(self) -> Self::Usize; + /// Equivalent to calling [`pointer::bare_addr`] on each element. + fn bare_addr(self) -> Self::Usize; /// Creates a new pointer with the given address. /// @@ -108,7 +108,7 @@ where } #[inline] - fn addr(self) -> Self::Usize { + fn bare_addr(self) -> Self::Usize { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the // provenance). @@ -123,7 +123,7 @@ where // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. self.cast::() - .wrapping_offset(addr.cast::() - self.addr().cast::()) + .wrapping_offset(addr.cast::() - self.bare_addr().cast::()) .cast() } diff --git a/library/portable-simd/crates/core_simd/tests/pointers.rs b/library/portable-simd/crates/core_simd/tests/pointers.rs index b9f32d16e01d1..acaeb820beb35 100644 --- a/library/portable-simd/crates/core_simd/tests/pointers.rs +++ b/library/portable-simd/crates/core_simd/tests/pointers.rs @@ -16,10 +16,10 @@ macro_rules! common_tests { ); } - fn addr() { + fn bare_addr() { test_helpers::test_unary_elementwise( - &Simd::<*$constness u32, LANES>::addr, - &<*$constness u32>::addr, + &Simd::<*$constness u32, LANES>::bare_addr, + &<*$constness u32>::bare_addr, &|_| true, ); } diff --git a/library/proc_macro/src/bridge/arena.rs b/library/proc_macro/src/bridge/arena.rs index f81f2152cd046..f89e90ca18c44 100644 --- a/library/proc_macro/src/bridge/arena.rs +++ b/library/proc_macro/src/bridge/arena.rs @@ -72,9 +72,9 @@ impl Arena { /// chunk. Returns `None` if there is no free space left to satisfy the /// request. fn alloc_raw_without_grow(&self, bytes: usize) -> Option<&mut [MaybeUninit]> { - let start = self.start.get().addr(); + let start = self.start.get().bare_addr(); let old_end = self.end.get(); - let end = old_end.addr(); + let end = old_end.bare_addr(); let new_end = end.checked_sub(bytes)?; if start <= new_end { diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 475b3e7eb9312..479a8c45a211c 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -333,7 +333,7 @@ impl Backtrace { frame: RawFrame::Actual(frame.clone()), symbols: Vec::new(), }); - if frame.symbol_address().addr() == ip && actual_start.is_none() { + if frame.symbol_address().bare_addr() == ip && actual_start.is_none() { actual_start = Some(frames.len()); } true diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 6f8d5e3777568..903ec3a21d9aa 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -144,7 +144,7 @@ impl Repr { let p = Box::into_raw(b).cast::(); // Should only be possible if an allocator handed out a pointer with // wrong alignment. - debug_assert_eq!(p.addr() & TAG_MASK, 0); + debug_assert_eq!(p.bare_addr() & TAG_MASK, 0); // Note: We know `TAG_CUSTOM <= size_of::()` (static_assert at // end of file), and both the start and end of the expression must be // valid without address space wraparound due to `Box`'s semantics. @@ -252,7 +252,7 @@ unsafe fn decode_repr(ptr: NonNull<()>, make_custom: F) -> ErrorData where F: FnOnce(*mut Custom) -> C, { - let bits = ptr.as_ptr().addr(); + let bits = ptr.as_ptr().bare_addr(); match bits & TAG_MASK { TAG_OS => { let code = ((bits as i64) >> 32) as RawOsError; diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs index 9757653e02c06..b843a5e0bafb2 100644 --- a/library/std/src/os/unix/net/addr.rs +++ b/library/std/src/os/unix/net/addr.rs @@ -20,8 +20,8 @@ mod libc { fn sun_path_offset(addr: &libc::sockaddr_un) -> usize { // Work with an actual instance of the type since using a null pointer is UB - let base = (addr as *const libc::sockaddr_un).addr(); - let path = core::ptr::addr_of!(addr.sun_path).addr(); + let base = (addr as *const libc::sockaddr_un).bare_addr(); + let path = core::ptr::addr_of!(addr.sun_path).bare_addr(); path - base } diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 56ea51226f9d5..5d5c9e05789c3 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -1482,8 +1482,8 @@ impl PathBuf { }; // truncate until right after the file stem - let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr(); - let start = self.inner.as_encoded_bytes().as_ptr().addr(); + let end_file_stem = file_stem[file_stem.len()..].as_ptr().bare_addr(); + let start = self.inner.as_encoded_bytes().as_ptr().bare_addr(); let v = self.as_mut_vec(); v.truncate(end_file_stem.wrapping_sub(start)); diff --git a/library/std/src/sync/mpmc/waker.rs b/library/std/src/sync/mpmc/waker.rs index 9aab1b9417edb..e5a072b06d3f8 100644 --- a/library/std/src/sync/mpmc/waker.rs +++ b/library/std/src/sync/mpmc/waker.rs @@ -206,5 +206,5 @@ pub fn current_thread_id() -> usize { // `u8` is not drop so this variable will be available during thread destruction, // whereas `thread::current()` would not be thread_local! { static DUMMY: u8 = 0 } - DUMMY.with(|x| (x as *const u8).addr()) + DUMMY.with(|x| (x as *const u8).bare_addr()) } diff --git a/library/std/src/sync/reentrant_lock.rs b/library/std/src/sync/reentrant_lock.rs index 9a44998ebf644..a6ab36ccccead 100644 --- a/library/std/src/sync/reentrant_lock.rs +++ b/library/std/src/sync/reentrant_lock.rs @@ -316,5 +316,5 @@ impl Drop for ReentrantLockGuard<'_, T> { pub(crate) fn current_thread_unique_ptr() -> usize { // Use a non-drop type to make sure it's still available during thread destruction. thread_local! { static X: u8 = const { 0 } } - X.with(|x| <*const _>::addr(x)) + X.with(|x| <*const _>::bare_addr(x)) } diff --git a/library/std/src/sys/locks/condvar/xous.rs b/library/std/src/sys/locks/condvar/xous.rs index 0e51449e0afa4..6f0b68ccb47ea 100644 --- a/library/std/src/sys/locks/condvar/xous.rs +++ b/library/std/src/sys/locks/condvar/xous.rs @@ -85,7 +85,7 @@ impl Condvar { } fn index(&self) -> usize { - core::ptr::from_ref(self).addr() + core::ptr::from_ref(self).bare_addr() } /// Unlock the given Mutex and wait for the notification. Wait at most diff --git a/library/std/src/sys/locks/mutex/xous.rs b/library/std/src/sys/locks/mutex/xous.rs index a8c9518ff0bcf..3b1d2fe6d79c4 100644 --- a/library/std/src/sys/locks/mutex/xous.rs +++ b/library/std/src/sys/locks/mutex/xous.rs @@ -29,7 +29,7 @@ impl Mutex { } fn index(&self) -> usize { - core::ptr::from_ref(self).addr() + core::ptr::from_ref(self).bare_addr() } #[inline] diff --git a/library/std/src/sys/locks/rwlock/queue.rs b/library/std/src/sys/locks/rwlock/queue.rs index dce966086b8ff..6d4cbe4f50860 100644 --- a/library/std/src/sys/locks/rwlock/queue.rs +++ b/library/std/src/sys/locks/rwlock/queue.rs @@ -137,14 +137,14 @@ const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED); #[inline] fn write_lock(state: State) -> Option { let state = state.wrapping_byte_add(LOCKED); - if state.addr() & LOCKED == LOCKED { Some(state) } else { None } + if state.bare_addr() & LOCKED == LOCKED { Some(state) } else { None } } /// Marks the state as read-locked, if possible. #[inline] fn read_lock(state: State) -> Option { - if state.addr() & QUEUED == 0 && state.addr() != LOCKED { - Some(without_provenance_mut(state.addr().checked_add(SINGLE)? | LOCKED)) + if state.bare_addr() & QUEUED == 0 && state.bare_addr() != LOCKED { + Some(without_provenance_mut(state.bare_addr().checked_add(SINGLE)? | LOCKED)) } else { None } @@ -303,7 +303,7 @@ impl RwLock { // "ldseta" on modern AArch64), and therefore is more efficient than // `fetch_update(lock(true))`, which can spuriously fail if a new node // is appended to the queue. - self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0 + self.state.fetch_or(LOCKED, Acquire).bare_addr() & LOCKED == 0 } #[inline] @@ -326,7 +326,7 @@ impl RwLock { Ok(_) => return, Err(new) => state = new, } - } else if state.addr() & QUEUED == 0 && count < SPIN_COUNT { + } else if state.bare_addr() & QUEUED == 0 && count < SPIN_COUNT { // If the lock is not available and no threads are queued, spin // for a while, using exponential backoff to decrease cache // contention. @@ -346,10 +346,10 @@ impl RwLock { node.next.0 = AtomicPtr::new(state.mask(MASK).cast()); node.prev = AtomicLink::new(None); let mut next = ptr::from_ref(&node) - .map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED)) + .map_addr(|addr| addr | QUEUED | (state.bare_addr() & LOCKED)) as State; - if state.addr() & QUEUED == 0 { + if state.bare_addr() & QUEUED == 0 { // If this is the first node in the queue, set the tail field to // the node itself to ensure there is a current `tail` field in // the queue (invariants 1 and 2). This needs to use `set` to @@ -378,7 +378,7 @@ impl RwLock { // If the current thread locked the queue, unlock it again, // linking it in the process. - if state.addr() & (QUEUE_LOCKED | QUEUED) == QUEUED { + if state.bare_addr() & (QUEUE_LOCKED | QUEUED) == QUEUED { unsafe { self.unlock_queue(next); } @@ -403,8 +403,8 @@ impl RwLock { #[inline] pub unsafe fn read_unlock(&self) { match self.state.fetch_update(Release, Acquire, |state| { - if state.addr() & QUEUED == 0 { - let count = state.addr() - (SINGLE | LOCKED); + if state.bare_addr() & QUEUED == 0 { + let count = state.bare_addr() - (SINGLE | LOCKED); Some(if count > 0 { without_provenance_mut(count | LOCKED) } else { UNLOCKED }) } else { None @@ -431,7 +431,7 @@ impl RwLock { // The lock count is stored in the `next` field of `tail`. // Decrement it, making sure to observe all changes made to the queue // by the other lock owners by using acquire-release ordering. - let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0; + let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).bare_addr() - SINGLE == 0; if was_last { // SAFETY: // Other threads cannot read-lock while threads are queued. Also, @@ -464,7 +464,7 @@ impl RwLock { match self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { // The queue lock was acquired. Release it, waking up the next // waiter in the process. - Ok(_) if state.addr() & QUEUE_LOCKED == 0 => unsafe { + Ok(_) if state.bare_addr() & QUEUE_LOCKED == 0 => unsafe { return self.unlock_queue(next); }, // Another thread already holds the queue lock, leave waking up @@ -481,12 +481,12 @@ impl RwLock { /// # Safety /// The queue lock must be held by the current thread. unsafe fn unlock_queue(&self, mut state: State) { - debug_assert_eq!(state.addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED); + debug_assert_eq!(state.bare_addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED); loop { let tail = unsafe { add_backlinks_and_find_tail(to_node(state)) }; - if state.addr() & LOCKED == LOCKED { + if state.bare_addr() & LOCKED == LOCKED { // Another thread has locked the lock. Leave waking up waiters // to them by releasing the queue lock. match self.state.compare_exchange_weak( diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs index f99cea360f1f4..eed4557a07e66 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs @@ -406,8 +406,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) assert!(is_enclave_range(src, len)); assert!(is_user_range(dst, len)); assert!(len < isize::MAX as usize); - assert!(!src.addr().overflowing_add(len).1); - assert!(!dst.addr().overflowing_add(len).1); + assert!(!src.bare_addr().overflowing_add(len).1); + assert!(!dst.bare_addr().overflowing_add(len).1); unsafe { let (len1, len2, len3) = u64_align_to_guaranteed(dst, len); diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs index 2af6382f3daee..b66f1ec40ca6b 100644 --- a/library/std/src/sys/pal/unix/thread.rs +++ b/library/std/src/sys/pal/unix/thread.rs @@ -795,10 +795,10 @@ pub mod guard { let stack_ptr = current_stack.ss_sp; let stackaddr = if libc::pthread_main_np() == 1 { // main thread - stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed) + stack_ptr.bare_addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed) } else { // new thread - stack_ptr.addr() - current_stack.ss_size + stack_ptr.bare_addr() - current_stack.ss_size }; Some(stack_ptr.with_addr(stackaddr)) } @@ -837,7 +837,7 @@ pub mod guard { let page_size = PAGE_SIZE.load(Ordering::Relaxed); assert!(page_size != 0); let stackptr = get_stack_start()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.bare_addr(); // Ensure stackaddr is page aligned! A parent process might // have reset RLIMIT_STACK to be non-page aligned. The @@ -869,7 +869,7 @@ pub mod guard { // faulting, so our handler can report "stack overflow", and // trust that the kernel's own stack guard will work. let stackptr = get_stack_start_aligned()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.bare_addr(); Some(stackaddr - page_size..stackaddr) } else if cfg!(all(target_os = "linux", target_env = "musl")) { // For the main thread, the musl's pthread_attr_getstack @@ -883,7 +883,7 @@ pub mod guard { // ourselves, FreeBSD's guard page moves upwards. So we'll just use // the builtin guard page. let stackptr = get_stack_start_aligned()?; - let guardaddr = stackptr.addr(); + let guardaddr = stackptr.bare_addr(); // Technically the number of guard pages is tunable and controlled // by the security.bsd.stack_guard_page sysctl. // By default it is 1, checking once is enough since it is @@ -919,7 +919,7 @@ pub mod guard { // faulting, so our handler can report "stack overflow", and // trust that the kernel's own stack guard will work. let stackptr = get_stack_start_aligned()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.bare_addr(); Some(stackaddr - page_size..stackaddr) } else { // Reallocate the last page of the stack. @@ -948,7 +948,7 @@ pub mod guard { panic!("failed to protect the guard page: {}", io::Error::last_os_error()); } - let guardaddr = stackptr.addr(); + let guardaddr = stackptr.bare_addr(); Some(guardaddr..guardaddr + page_size) } @@ -957,7 +957,7 @@ pub mod guard { #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))] pub unsafe fn current() -> Option { let stackptr = get_stack_start()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.bare_addr(); Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr) } @@ -995,7 +995,7 @@ pub mod guard { let mut size = 0; assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0); - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.bare_addr(); ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) { Some(stackaddr - guardsize..stackaddr) } else if cfg!(all(target_os = "linux", target_env = "musl")) { diff --git a/library/std/src/sys/pal/unix/weak.rs b/library/std/src/sys/pal/unix/weak.rs index 48cc8633e93d2..21c9181eb7cf8 100644 --- a/library/std/src/sys/pal/unix/weak.rs +++ b/library/std/src/sys/pal/unix/weak.rs @@ -93,7 +93,7 @@ impl DlsymWeak { // Relaxed is fine here because we fence before reading through the // pointer (see the comment below). match self.func.load(Ordering::Relaxed) { - func if func.addr() == 1 => self.initialize(), + func if func.bare_addr() == 1 => self.initialize(), func if func.is_null() => None, func => { let func = mem::transmute_copy::<*mut libc::c_void, F>(&func); diff --git a/library/std/src/sys/pal/windows/alloc.rs b/library/std/src/sys/pal/windows/alloc.rs index 270eca37b14d6..d5c90f39756e5 100644 --- a/library/std/src/sys/pal/windows/alloc.rs +++ b/library/std/src/sys/pal/windows/alloc.rs @@ -162,7 +162,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { // Create a correctly aligned pointer offset from the start of the allocated block, // and write a header before it. - let offset = layout.align() - (ptr.addr() & (layout.align() - 1)); + let offset = layout.align() - (ptr.bare_addr() & (layout.align() - 1)); // SAFETY: `MIN_ALIGN` <= `offset` <= `layout.align()` and the size of the allocated // block is `layout.align() + layout.size()`. `aligned` will thus be a correctly aligned // pointer inside the allocated block with at least `layout.size()` bytes after it and at diff --git a/library/std/src/sys/pal/windows/mod.rs b/library/std/src/sys/pal/windows/mod.rs index a53c4034d0685..6cbd891d24dc1 100644 --- a/library/std/src/sys/pal/windows/mod.rs +++ b/library/std/src/sys/pal/windows/mod.rs @@ -153,7 +153,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option { ($($n:literal,)+) => { $( if start[$n] == needle { - return Some(((&start[$n] as *const u16).addr() - ptr.addr()) / 2); + return Some(((&start[$n] as *const u16).bare_addr() - ptr.bare_addr()) / 2); } )+ } @@ -166,7 +166,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option { for c in start { if *c == needle { - return Some(((c as *const u16).addr() - ptr.addr()) / 2); + return Some(((c as *const u16).bare_addr() - ptr.bare_addr()) / 2); } } None diff --git a/library/std/src/sys/pal/xous/thread_parking.rs b/library/std/src/sys/pal/xous/thread_parking.rs index 0bd0462d77d35..da15438e55678 100644 --- a/library/std/src/sys/pal/xous/thread_parking.rs +++ b/library/std/src/sys/pal/xous/thread_parking.rs @@ -22,7 +22,7 @@ impl Parker { } fn index(&self) -> usize { - ptr::from_ref(self).addr() + ptr::from_ref(self).bare_addr() } pub unsafe fn park(self: Pin<&Self>) { diff --git a/library/std/src/sys/personality/dwarf/eh.rs b/library/std/src/sys/personality/dwarf/eh.rs index a78084de0faef..85336d2b3c336 100644 --- a/library/std/src/sys/personality/dwarf/eh.rs +++ b/library/std/src/sys/personality/dwarf/eh.rs @@ -111,12 +111,12 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result // SjLj version: // The "IP" is an index into the call-site table, with two exceptions: // -1 means 'no-action', and 0 means 'terminate'. - match ip.addr() as isize { + match ip.bare_addr() as isize { -1 => return Ok(EHAction::None), 0 => return Ok(EHAction::Terminate), _ => (), } - let mut idx = ip.addr(); + let mut idx = ip.bare_addr(); loop { let cs_lpad = reader.read_uleb128(); let cs_action_entry = reader.read_uleb128(); @@ -230,8 +230,9 @@ unsafe fn read_encoded_pointer( DW_EH_PE_datarel => (*context.get_data_start)(), // aligned means the value is aligned to the size of a pointer DW_EH_PE_aligned => { - reader.ptr = - reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<*const u8>())?); + reader.ptr = reader + .ptr + .with_addr(round_up(reader.ptr.bare_addr(), mem::size_of::<*const u8>())?); core::ptr::null() } _ => return Err(()), diff --git a/library/std/src/sys/thread_local/os_local.rs b/library/std/src/sys/thread_local/os_local.rs index 3edffd7e4437c..f626ffe2f26c5 100644 --- a/library/std/src/sys/thread_local/os_local.rs +++ b/library/std/src/sys/thread_local/os_local.rs @@ -115,7 +115,7 @@ impl Key { pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> { // SAFETY: See the documentation for this method. let ptr = unsafe { self.os.get() as *mut Value }; - if ptr.addr() > 1 { + if ptr.bare_addr() > 1 { // SAFETY: the check ensured the pointer is safe (its destructor // is not running) + it is coming from a trusted source (self). if let Some(ref value) = unsafe { (*ptr).inner.get() } { @@ -134,7 +134,7 @@ impl Key { // SAFETY: No mutable references are ever handed out meaning getting // the value is ok. let ptr = unsafe { self.os.get() as *mut Value }; - if ptr.addr() == 1 { + if ptr.bare_addr() == 1 { // destructor is running return None; } diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs index 730cdb768bd27..38c751ded315a 100644 --- a/library/std/src/sys_common/once/queue.rs +++ b/library/std/src/sys_common/once/queue.rs @@ -119,12 +119,12 @@ impl Once { // operations visible to us, and, this being a fast path, weaker // ordering helps with performance. This `Acquire` synchronizes with // `Release` operations on the slow path. - self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE + self.state_and_queue.load(Ordering::Acquire).bare_addr() == COMPLETE } #[inline] pub(crate) fn state(&mut self) -> ExclusiveState { - match self.state_and_queue.get_mut().addr() { + match self.state_and_queue.get_mut().bare_addr() { INCOMPLETE => ExclusiveState::Incomplete, POISONED => ExclusiveState::Poisoned, COMPLETE => ExclusiveState::Complete, @@ -148,7 +148,7 @@ impl Once { pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); loop { - match state_and_queue.addr() { + match state_and_queue.bare_addr() { COMPLETE => break, POISONED if !ignore_poisoning => { // Panic to propagate the poison. @@ -176,7 +176,7 @@ impl Once { // poisoned or not. let init_state = public::OnceState { inner: OnceState { - poisoned: state_and_queue.addr() == POISONED, + poisoned: state_and_queue.bare_addr() == POISONED, set_state_on_drop_to: Cell::new(ptr::without_provenance_mut(COMPLETE)), }, }; @@ -187,7 +187,7 @@ impl Once { _ => { // All other values must be RUNNING with possibly a // pointer to the waiter queue in the more significant bits. - assert!(state_and_queue.addr() & STATE_MASK == RUNNING); + assert!(state_and_queue.bare_addr() & STATE_MASK == RUNNING); wait(&self.state_and_queue, state_and_queue); state_and_queue = self.state_and_queue.load(Ordering::Acquire); } @@ -202,7 +202,7 @@ fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { loop { // Don't queue this thread if the status is no longer running, // otherwise we will not be woken up. - if current_state.addr() & STATE_MASK != RUNNING { + if current_state.bare_addr() & STATE_MASK != RUNNING { return; } @@ -210,7 +210,7 @@ fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { let node = Waiter { thread: Cell::new(Some(thread::current())), signaled: AtomicBool::new(false), - next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, + next: current_state.with_addr(current_state.bare_addr() & !STATE_MASK) as *const Waiter, }; let me = core::ptr::addr_of!(node) as *const Masked as *mut Masked; @@ -218,7 +218,7 @@ fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { // that another thread didn't just replace the head of the linked list. let exchange_result = state_and_queue.compare_exchange( current_state, - me.with_addr(me.addr() | RUNNING), + me.with_addr(me.bare_addr() | RUNNING), Ordering::Release, Ordering::Relaxed, ); @@ -257,7 +257,7 @@ impl Drop for WaiterQueue<'_> { self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); // We should only ever see an old state which was RUNNING. - assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); + assert_eq!(state_and_queue.bare_addr() & STATE_MASK, RUNNING); // Walk the entire linked list of waiters and wake them up (in lifo // order, last to register is first to wake up). @@ -266,8 +266,8 @@ impl Drop for WaiterQueue<'_> { // free `node` if there happens to be has a spurious wakeup. // So we have to take out the `thread` field and copy the pointer to // `next` first. - let mut queue = - state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; + let mut queue = state_and_queue.with_addr(state_and_queue.bare_addr() & !STATE_MASK) + as *const Waiter; while !queue.is_null() { let next = (*queue).next; let thread = (*queue).thread.take().unwrap(); diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs index 0466743966034..a02f573b955d0 100644 --- a/library/std/src/sys_common/thread_parking/id.rs +++ b/library/std/src/sys_common/thread_parking/id.rs @@ -62,7 +62,7 @@ impl Parker { // The state must be reset with acquire ordering to ensure that all // calls to `unpark` synchronize with this thread. while self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_err() { - park(self.state.as_ptr().addr()); + park(self.state.as_ptr().bare_addr()); } } } @@ -72,7 +72,7 @@ impl Parker { let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); if state == PARKED { - park_timeout(dur, self.state.as_ptr().addr()); + park_timeout(dur, self.state.as_ptr().bare_addr()); // Swap to ensure that we observe all state changes with acquire // ordering. self.state.swap(EMPTY, Acquire); @@ -94,7 +94,7 @@ impl Parker { // and terminated before this call is made. This call then returns an // error or wakes up an unrelated thread. The platform API and // environment does allow this, however. - unpark(tid, self.state.as_ptr().addr()); + unpark(tid, self.state.as_ptr().bare_addr()); } } } diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs index 527c408c89edd..c1aa4e577f28d 100644 --- a/library/unwind/src/libunwind.rs +++ b/library/unwind/src/libunwind.rs @@ -238,7 +238,7 @@ if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos", targe pub unsafe fn _Unwind_SetIP(ctx: *mut _Unwind_Context, value: _Unwind_Word) { // Propagate thumb bit to instruction pointer - let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG).addr() & 1; + let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG).bare_addr() & 1; let value = value.map_addr(|v| v | thumb_state); _Unwind_SetGR(ctx, UNWIND_IP_REG, value); } diff --git a/src/tools/miri/tests/fail/provenance/provenance_transmute.rs b/src/tools/miri/tests/fail/provenance/provenance_transmute.rs index bc5dd53dcf5e4..727f9c1057e5b 100644 --- a/src/tools/miri/tests/fail/provenance/provenance_transmute.rs +++ b/src/tools/miri/tests/fail/provenance/provenance_transmute.rs @@ -22,6 +22,6 @@ fn main() { let ptr2 = &1u8 as *const u8; unsafe { // Two pointers with the same address but different provenance. - deref(ptr1, ptr2.with_addr(ptr1.addr())); + deref(ptr1, ptr2.with_addr(ptr1.bare_addr())); } } diff --git a/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr b/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr index 6b1c2941c075c..991e2853dcfc2 100644 --- a/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr +++ b/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr @@ -11,8 +11,8 @@ LL | let _val = *left_ptr; note: inside `main` --> $DIR/provenance_transmute.rs:LL:CC | -LL | deref(ptr1, ptr2.with_addr(ptr1.addr())); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | deref(ptr1, ptr2.with_addr(ptr1.bare_addr())); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace diff --git a/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs b/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs index 20fd330699890..5c53dc652bb76 100644 --- a/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs +++ b/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs @@ -5,7 +5,7 @@ fn main() { let x: i32 = 3; let x_ptr = &x as *const i32; - let x_usize: usize = x_ptr.addr(); + let x_usize: usize = x_ptr.bare_addr(); // Cast back an address that did *not* get exposed. let ptr = std::ptr::from_exposed_addr::(x_usize); assert_eq!(unsafe { *ptr }, 3); //~ ERROR: is a dangling pointer diff --git a/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs b/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs index e075db66039bc..ae74c16747815 100644 --- a/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs +++ b/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs @@ -17,8 +17,8 @@ fn main() { let _val = unsafe { buffer.read() }; // Let's find a place to promise alignment 8. - let align8 = if buffer.addr() % 8 == 0 { buffer } else { buffer.wrapping_add(1) }; - assert!(align8.addr() % 8 == 0); + let align8 = if buffer.bare_addr() % 8 == 0 { buffer } else { buffer.wrapping_add(1) }; + assert!(align8.bare_addr() % 8 == 0); unsafe { utils::miri_promise_symbolic_alignment(align8.cast(), 8) }; // Promising the alignment down to 1 *again* still must not hurt. unsafe { utils::miri_promise_symbolic_alignment(buffer.cast(), 1) }; @@ -37,8 +37,8 @@ fn main() { #[derive(Copy, Clone)] struct Align16(#[allow(dead_code)] u128); - let align16 = if align8.addr() % 16 == 0 { align8 } else { align8.wrapping_add(2) }; - assert!(align16.addr() % 16 == 0); + let align16 = if align8.bare_addr() % 16 == 0 { align8 } else { align8.wrapping_add(2) }; + assert!(align16.bare_addr() % 16 == 0); let _val = unsafe { align8.cast::().read() }; //~[read_unaligned_ptr]^ ERROR: accessing memory based on pointer with alignment 8, but alignment 16 is required diff --git a/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs b/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs index 5cf62995fbee2..75456bb172ea0 100644 --- a/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs +++ b/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs @@ -65,7 +65,7 @@ fn main() { // The pointer is not modified on failure, posix_memalign(3) says: // > On Linux (and other systems), posix_memalign() does not modify memptr on failure. // > A requirement standardizing this behavior was added in POSIX.1-2008 TC2. - assert_eq!(ptr.addr(), 0x1234567); + assert_eq!(ptr.bare_addr(), 0x1234567); } // Too small align (smaller than ptr) @@ -77,6 +77,6 @@ fn main() { // The pointer is not modified on failure, posix_memalign(3) says: // > On Linux (and other systems), posix_memalign() does not modify memptr on failure. // > A requirement standardizing this behavior was added in POSIX.1-2008 TC2. - assert_eq!(ptr.addr(), 0x1234567); + assert_eq!(ptr.bare_addr(), 0x1234567); } } diff --git a/src/tools/miri/tests/pass/atomic.rs b/src/tools/miri/tests/pass/atomic.rs index dfdc9b42f81fc..d4d185d898d99 100644 --- a/src/tools/miri/tests/pass/atomic.rs +++ b/src/tools/miri/tests/pass/atomic.rs @@ -136,45 +136,45 @@ fn atomic_ptr() { let x = array.as_ptr() as *mut i32; let ptr = AtomicPtr::::new(ptr::null_mut()); - assert!(ptr.load(Relaxed).addr() == 0); + assert!(ptr.load(Relaxed).bare_addr() == 0); ptr.store(ptr::without_provenance_mut(13), SeqCst); - assert!(ptr.swap(x, Relaxed).addr() == 13); + assert!(ptr.swap(x, Relaxed).bare_addr() == 13); unsafe { assert!(*ptr.load(Acquire) == 0) }; // comparison ignores provenance assert_eq!( ptr.compare_exchange( - (&mut 0 as *mut i32).with_addr(x.addr()), + (&mut 0 as *mut i32).with_addr(x.bare_addr()), ptr::without_provenance_mut(0), SeqCst, SeqCst ) .unwrap() - .addr(), - x.addr(), + .bare_addr(), + x.bare_addr(), ); assert_eq!( ptr.compare_exchange( - (&mut 0 as *mut i32).with_addr(x.addr()), + (&mut 0 as *mut i32).with_addr(x.bare_addr()), ptr::without_provenance_mut(0), SeqCst, SeqCst ) .unwrap_err() - .addr(), + .bare_addr(), 0, ); ptr.store(x, Relaxed); - assert_eq!(ptr.fetch_ptr_add(13, AcqRel).addr(), x.addr()); + assert_eq!(ptr.fetch_ptr_add(13, AcqRel).bare_addr(), x.bare_addr()); unsafe { assert_eq!(*ptr.load(SeqCst), 13) }; // points to index 13 now - assert_eq!(ptr.fetch_ptr_sub(4, AcqRel).addr(), x.addr() + 13 * 4); + assert_eq!(ptr.fetch_ptr_sub(4, AcqRel).bare_addr(), x.bare_addr() + 13 * 4); unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; - assert_eq!(ptr.fetch_or(3, AcqRel).addr(), x.addr() + 9 * 4); // ptr is 4-aligned, so set the last 2 bits - assert_eq!(ptr.fetch_and(!3, AcqRel).addr(), (x.addr() + 9 * 4) | 3); // and unset them again + assert_eq!(ptr.fetch_or(3, AcqRel).bare_addr(), x.bare_addr() + 9 * 4); // ptr is 4-aligned, so set the last 2 bits + assert_eq!(ptr.fetch_and(!3, AcqRel).bare_addr(), (x.bare_addr() + 9 * 4) | 3); // and unset them again unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; - assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr(), x.addr() + 9 * 4); - assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr(), (x.addr() + 9 * 4) ^ 0xdeadbeef); + assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).bare_addr(), x.bare_addr() + 9 * 4); + assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).bare_addr(), (x.bare_addr() + 9 * 4) ^ 0xdeadbeef); unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; // after XORing twice with the same thing, we get our ptr back } diff --git a/src/tools/miri/tests/pass/const-addrs.rs b/src/tools/miri/tests/pass/const-addrs.rs index 6c14f0b679ce8..ec78259c135f3 100644 --- a/src/tools/miri/tests/pass/const-addrs.rs +++ b/src/tools/miri/tests/pass/const-addrs.rs @@ -25,7 +25,7 @@ fn main() { // Check that within a call we always produce the same address let mut prev = 0; for iter in 0..EVALS { - let addr = "test".as_bytes().as_ptr().addr(); + let addr = "test".as_bytes().as_ptr().bare_addr(); if iter > 0 { assert_eq!(prev, addr); } @@ -34,5 +34,5 @@ fn main() { } fn const_addr() -> usize { - "test".as_bytes().as_ptr().addr() + "test".as_bytes().as_ptr().bare_addr() } diff --git a/src/tools/miri/tests/pass/ptr_int_from_exposed.rs b/src/tools/miri/tests/pass/ptr_int_from_exposed.rs index d8d57679e6b36..eb09a0db6aa0c 100644 --- a/src/tools/miri/tests/pass/ptr_int_from_exposed.rs +++ b/src/tools/miri/tests/pass/ptr_int_from_exposed.rs @@ -52,7 +52,7 @@ fn ptr_roundtrip_null() { assert_eq!(null, 0); let x_null_ptr_copy = ptr::from_exposed_addr::(null); // just a roundtrip, so has provenance of x (angelically) - let x_ptr_copy = x_null_ptr_copy.with_addr(x_ptr.addr()); // addr of x and provenance of x + let x_ptr_copy = x_null_ptr_copy.with_addr(x_ptr.bare_addr()); // addr of x and provenance of x assert_eq!(unsafe { *x_ptr_copy }, 42); } diff --git a/src/tools/miri/tests/pass/shims/ptr_mask.rs b/src/tools/miri/tests/pass/shims/ptr_mask.rs index fb8bb6b13dbc2..273a166c1b575 100644 --- a/src/tools/miri/tests/pass/shims/ptr_mask.rs +++ b/src/tools/miri/tests/pass/shims/ptr_mask.rs @@ -7,10 +7,10 @@ fn main() { // u32 is 4 aligned, // so the lower `log2(4) = 2` bits of the address are always 0 - assert_eq!(ptr.addr() & 0b11, 0); + assert_eq!(ptr.bare_addr() & 0b11, 0); let tagged_ptr = ptr.map_addr(|a| a | 0b11); - let tag = tagged_ptr.addr() & 0b11; + let tag = tagged_ptr.bare_addr() & 0b11; let masked_ptr = tagged_ptr.mask(!0b11); assert_eq!(tag, 0b11); diff --git a/src/tools/miri/tests/pass/transmute_ptr.rs b/src/tools/miri/tests/pass/transmute_ptr.rs index ce6d86b7068a0..fc75448ada768 100644 --- a/src/tools/miri/tests/pass/transmute_ptr.rs +++ b/src/tools/miri/tests/pass/transmute_ptr.rs @@ -42,7 +42,7 @@ fn ptr_in_two_halves() { // Now target_arr[1] is a mix of the two `ptr` we had stored in `arr`. let strange_ptr = target_arr[1]; // Check that the provenance works out. - assert_eq!(*strange_ptr.with_addr(ptr.addr()), 0); + assert_eq!(*strange_ptr.with_addr(ptr.bare_addr()), 0); } } diff --git a/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs b/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs index d4a74b3d78283..e2a0f560bd60c 100644 --- a/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs +++ b/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs @@ -7,8 +7,8 @@ pub fn test(src: *const u8, dst: *const u8) -> usize { // CHECK-LABEL: @test( // CHECK-NOT: panic - let src_usize = src.addr(); - let dst_usize = dst.addr(); + let src_usize = src.bare_addr(); + let dst_usize = dst.bare_addr(); if src_usize > dst_usize { return src_usize - dst_usize; } diff --git a/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr b/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr index aa151fe2d214e..c2b27d1e86d1d 100644 --- a/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr +++ b/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr @@ -4,16 +4,16 @@ error: under strict provenance it is considered bad style to cast pointer `*cons LL | let addr: usize = &x as *const u8 as usize; | ^^^^^^^^^^^^^^^^^^^^^^^^ | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead + = help: if you need to cast the address back to a pointer later, use `.expose_addr()` instead note: the lint level is defined here --> $DIR/lint-strict-provenance-lossy-casts.rs:2:9 | LL | #![deny(lossy_provenance_casts)] | ^^^^^^^^^^^^^^^^^^^^^^ -help: use `.addr()` to obtain the address of a pointer +help: use `.bare_addr()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead | -LL | let addr: usize = (&x as *const u8).addr(); - | + ~~~~~~~~ +LL | let addr: usize = (&x as *const u8).bare_addr(); + | + ~~~~~~~~~~~~~ error: under strict provenance it is considered bad style to cast pointer `*const u8` to integer `u32` --> $DIR/lint-strict-provenance-lossy-casts.rs:9:22 @@ -21,31 +21,35 @@ error: under strict provenance it is considered bad style to cast pointer `*cons LL | let addr_32bit = &x as *const u8 as u32; | ^^^^^^^^^^^^^^^^^^^^^^ | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead -help: use `.addr()` to obtain the address of a pointer + = help: if you need to cast the address back to a pointer later, use `.expose_addr()` instead +help: use `.bare_addr()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead | -LL | let addr_32bit = (&x as *const u8).addr() as u32; - | + ~~~~~~~~~~~~~~~ +LL | let addr_32bit = (&x as *const u8).bare_addr() as u32; + | + ~~~~~~~~~~~~~~~~~~~~ error: under strict provenance it is considered bad style to cast pointer `*const u8` to integer `usize` --> $DIR/lint-strict-provenance-lossy-casts.rs:14:20 | LL | let ptr_addr = ptr as usize; - | ^^^--------- - | | - | help: use `.addr()` to obtain the address of a pointer: `.addr()` + | ^^^^^^^^^^^^ | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead + = help: if you need to cast the address back to a pointer later, use `.expose_addr()` instead +help: use `.bare_addr()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead + | +LL | let ptr_addr = ptr.bare_addr(); + | ~~~~~~~~~~~~ error: under strict provenance it is considered bad style to cast pointer `*const u8` to integer `u32` --> $DIR/lint-strict-provenance-lossy-casts.rs:16:26 | LL | let ptr_addr_32bit = ptr as u32; - | ^^^------- - | | - | help: use `.addr()` to obtain the address of a pointer: `.addr() as u32` + | ^^^^^^^^^^ + | + = help: if you need to cast the address back to a pointer later, use `.expose_addr()` instead +help: use `.bare_addr()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead +LL | let ptr_addr_32bit = ptr.bare_addr() as u32; + | ~~~~~~~~~~~~~~~~~~~ error: aborting due to 4 previous errors diff --git a/tests/ui/mir/alignment/i686-pc-windows-msvc.rs b/tests/ui/mir/alignment/i686-pc-windows-msvc.rs index 379f61ae818f2..f619092a6efe7 100644 --- a/tests/ui/mir/alignment/i686-pc-windows-msvc.rs +++ b/tests/ui/mir/alignment/i686-pc-windows-msvc.rs @@ -14,8 +14,8 @@ fn main() { let ptr = x.as_mut_ptr(); unsafe { let misaligned = ptr.byte_add(4); - assert!(misaligned.addr() % 8 != 0); - assert!(misaligned.addr() % 4 == 0); + assert!(misaligned.bare_addr() % 8 != 0); + assert!(misaligned.bare_addr() % 4 == 0); *misaligned = 42; } } diff --git a/tests/ui/structs-enums/type-sizes.rs b/tests/ui/structs-enums/type-sizes.rs index 66f663ce0776c..447a3fb70aa20 100644 --- a/tests/ui/structs-enums/type-sizes.rs +++ b/tests/ui/structs-enums/type-sizes.rs @@ -319,7 +319,7 @@ pub fn main() { // Currently the layout algorithm will choose the latter because it doesn't attempt // to aggregate multiple smaller fields to move a niche before a higher-alignment one. let b = BoolInTheMiddle(NonZero::new(1).unwrap(), true, 0); - assert!(ptr::from_ref(&b.1).addr() > ptr::from_ref(&b.2).addr()); + assert!(ptr::from_ref(&b.1).bare_addr() > ptr::from_ref(&b.2).bare_addr()); assert_eq!(size_of::>(), size_of::()); @@ -332,7 +332,8 @@ pub fn main() { // Neither field has a niche at the beginning so the layout algorithm should try move niches to // the end which means the 8-sized field shouldn't be alignment-promoted before the 4-sized one. let v = ReorderEndNiche { a: EndNiche8([0; 7], false), b: MiddleNiche4(0, 0, false, 0) }; - assert!(ptr::from_ref(&v.a).addr() > ptr::from_ref(&v.b).addr()); - - + assert!( + ptr::from_ref(&v.a).bare_addr() + > ptr::from_ref(&v.b).bare_addr() + ); }