diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 3c96290fc537e..7c7ffa3ba9080 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1477,7 +1477,7 @@ impl AtomicPtr { /// work with a deliberately misaligned pointer. In such cases, you may use /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead. /// - /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the + /// `fetch_add` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note /// that using [`Acquire`] makes the store part of this operation /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. @@ -1494,7 +1494,7 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_add(1, Ordering::Relaxed).addr(), 0); /// // Note: units of `size_of::()`. /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); /// ``` @@ -1502,7 +1502,7 @@ impl AtomicPtr { #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { + pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T { self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::()), order) } @@ -1518,7 +1518,7 @@ impl AtomicPtr { /// work with a deliberately misaligned pointer. In such cases, you may use /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead. /// - /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory + /// `fetch_sub` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. All ordering modes are possible. Note that /// using [`Acquire`] makes the store part of this operation [`Relaxed`], /// and using [`Release`] makes the load part [`Relaxed`]. @@ -1538,7 +1538,7 @@ impl AtomicPtr { /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); /// /// assert!(core::ptr::eq( - /// atom.fetch_ptr_sub(1, Ordering::Relaxed), + /// atom.fetch_sub(1, Ordering::Relaxed), /// &array[1], /// )); /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); @@ -1547,7 +1547,7 @@ impl AtomicPtr { #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { + pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T { self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::()), order) } diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 94b0310603bf4..bc978e7dd3114 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -131,13 +131,13 @@ fn int_max() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_add_null() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0); + assert_eq!(atom.fetch_add(1, SeqCst).addr(), 0); assert_eq!(atom.load(SeqCst).addr(), 8); assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8); assert_eq!(atom.load(SeqCst).addr(), 9); - assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9); + assert_eq!(atom.fetch_sub(1, SeqCst).addr(), 9); assert_eq!(atom.load(SeqCst).addr(), 1); assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1); @@ -150,10 +150,10 @@ fn ptr_add_data() { let num = 0i64; let n = &num as *const i64 as *mut _; let atom = AtomicPtr::::new(n); - assert_eq!(atom.fetch_ptr_add(1, SeqCst), n); + assert_eq!(atom.fetch_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), n.wrapping_add(1)); - assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1)); + assert_eq!(atom.fetch_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n); let bytes_from_n = |b| n.wrapping_byte_add(b); diff --git a/src/tools/miri/tests/pass/atomic.rs b/src/tools/miri/tests/pass/atomic.rs index e3d80a78916f6..bc34488ccfc4b 100644 --- a/src/tools/miri/tests/pass/atomic.rs +++ b/src/tools/miri/tests/pass/atomic.rs @@ -164,9 +164,9 @@ fn atomic_ptr() { ); ptr.store(x, Relaxed); - assert_eq!(ptr.fetch_ptr_add(13, AcqRel).addr(), x.addr()); + assert_eq!(ptr.fetch_add(13, AcqRel).addr(), x.addr()); unsafe { assert_eq!(*ptr.load(SeqCst), 13) }; // points to index 13 now - assert_eq!(ptr.fetch_ptr_sub(4, AcqRel).addr(), x.addr() + 13 * 4); + assert_eq!(ptr.fetch_sub(4, AcqRel).addr(), x.addr() + 13 * 4); unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; assert_eq!(ptr.fetch_or(3, AcqRel).addr(), x.addr() + 9 * 4); // ptr is 4-aligned, so set the last 2 bits assert_eq!(ptr.fetch_and(!3, AcqRel).addr(), (x.addr() + 9 * 4) | 3); // and unset them again