From 8e72286e93cdfbaef146d9c54b1e52a4ef09b474 Mon Sep 17 00:00:00 2001 From: Jon Gjengset Date: Fri, 10 Apr 2020 15:14:21 -0400 Subject: [PATCH] Add missing atomic operations to AtomicPtr This adds various `fetch_` methods to `AtomicPtr` that are present on other `Atomic*` types. It does so such that libraries that depend on atomic operations on pointers do not have to cast those pointers to `usize` and fiddle around with `AtomicUsize` instead. Note that this patch currently implements `fetch_add` and `fetch_sub` without considering the size of the pointer target. This is unlike regular pointer additions and subtractions. The rationale for this is that for atomic operations, the user may indeed wish to truly increment by 1, which is difficult if all deltas are interpreted in increments of the type's size. This patch effectively resurrects the change from #10154. Based on https://github.com/rust-lang/rust/issues/12949#issuecomment-37774119, the rationale for not making the changes at the time no longer hold. --- src/libcore/sync/atomic.rs | 210 ++++++++++++++++++++++++++++++++++++ src/libcore/tests/atomic.rs | 47 ++++++++ src/libcore/tests/lib.rs | 1 + 3 files changed, 258 insertions(+) diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 220f221cdd36d..dcf0c9315cdb0 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -1166,6 +1166,216 @@ impl AtomicPtr { } } } + + /// Adds to the current pointer, returning the previous pointer. + /// + /// Unlike other pointer additions, `fetch_add` increments directly by the provided value, + /// rather than interpreting it as a multiple of `size_of`. + /// + /// This operation wraps around on overflow. + /// + /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_ptr_fetch_op)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let foo = AtomicPtr::new(0 as *mut ()); + /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0 as *mut _); + /// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "atomic_ptr_fetch_op", issue = "none")] + pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { crate::mem::transmute(atomic_add(self.p.get() as *mut usize, val, order)) } + } + + /// Subtracts from the current pointer, returning the previous pointer. + /// + /// Unlike other pointer subtractions, `fetch_sub` decrements directly by the provided value, + /// rather than interpreting it as a multiple of `size_of`. + /// + /// This operation wraps around on overflow. + /// + /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_ptr_fetch_op)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let foo = AtomicPtr::new(20 as *mut ()); + /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20 as *mut _); + /// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "atomic_ptr_fetch_op", issue = "none")] + pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { crate::mem::transmute(atomic_sub(self.p.get() as *mut usize, val, order)) } + } + + /// Bitwise "and" with the current value. + /// + /// Performs a bitwise "and" operation on the current pointer and the argument `val`, and + /// sets the new pointer to the result. + /// + /// Returns the previous pointer. + /// + /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_ptr_fetch_op)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let foo = AtomicPtr::new(0b101101 as *mut ()); + /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101 as *mut _); + /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001 as *mut _); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "atomic_ptr_fetch_op", issue = "none")] + pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { crate::mem::transmute(atomic_and(self.p.get() as *mut usize, val, order)) } + } + + /// Bitwise "nand" with the current value. + /// + /// Performs a bitwise "nand" operation on the current pointer and the argument `val`, and + /// sets the new pointer to the result. + /// + /// Returns the previous pointer. + /// + /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_ptr_fetch_op)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let foo = AtomicPtr::new(0x13 as *mut ()); + /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13 as *mut _); + /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31) as *mut _); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "atomic_ptr_fetch_op", issue = "none")] + pub fn fetch_nand(&self, val: usize, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { crate::mem::transmute(atomic_nand(self.p.get() as *mut usize, val, order)) } + } + + /// Bitwise "or" with the current value. + /// + /// Performs a bitwise "or" operation on the current pointer and the argument `val`, and + /// sets the new pointer to the result. + /// + /// Returns the previous pointer. + /// + /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_ptr_fetch_op)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let foo = AtomicPtr::new(0b101101 as *mut ()); + /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101 as *mut _); + /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111 as *mut _); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "atomic_ptr_fetch_op", issue = "none")] + pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { crate::mem::transmute(atomic_or(self.p.get() as *mut usize, val, order)) } + } + + /// Bitwise "xor" with the current value. + /// + /// Performs a bitwise "xor" operation on the current pointer and the argument `val`, and + /// sets the new pointer to the result. + /// + /// Returns the previous pointer. + /// + /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_ptr_fetch_op)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let foo = AtomicPtr::new(0b101101 as *mut ()); + /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101 as *mut _); + /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110 as *mut _); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "atomic_ptr_fetch_op", issue = "none")] + pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { crate::mem::transmute(atomic_xor(self.p.get() as *mut usize, val, order)) } + } } #[cfg(target_has_atomic_load_store = "8")] diff --git a/src/libcore/tests/atomic.rs b/src/libcore/tests/atomic.rs index acbd913982c1f..e64a48559dd40 100644 --- a/src/libcore/tests/atomic.rs +++ b/src/libcore/tests/atomic.rs @@ -87,6 +87,53 @@ fn int_xor() { assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); } +#[test] +fn atomic_ptr() { + // This test assumes a contiguous memory layout for a (tuple) pair of usize + unsafe { + let mut mem: (usize, usize) = (1, 2); + let mut ptr = &mut mem.0 as *mut usize; + // ptr points to .0 + let atomic = AtomicPtr::new(ptr); + // atomic points to .0 + assert_eq!(atomic.fetch_add(core::mem::size_of::(), SeqCst), ptr); + // atomic points to .1 + ptr = atomic.load(SeqCst); + // ptr points to .1 + assert_eq!(*ptr, 2); + atomic.fetch_sub(core::mem::size_of::(), SeqCst); + // atomic points to .0 + ptr = atomic.load(SeqCst); + // ptr points to .0 + assert_eq!(*ptr, 1); + + // now try xor and back + assert_eq!(atomic.fetch_xor(ptr as usize, SeqCst), ptr); + // atomic is NULL + assert_eq!(atomic.fetch_xor(ptr as usize, SeqCst), std::ptr::null_mut()); + // atomic points to .0 + ptr = atomic.load(SeqCst); + // ptr points to .0 + assert_eq!(*ptr, 1); + + // then and with all 1s + assert_eq!(atomic.fetch_and(!0, SeqCst), ptr); + assert_eq!(atomic.load(SeqCst), ptr); + + // then or with all 0s + assert_eq!(atomic.fetch_or(0, SeqCst), ptr); + assert_eq!(atomic.load(SeqCst), ptr); + + // then or with all 1s + assert_eq!(atomic.fetch_or(!0, SeqCst), ptr); + assert_eq!(atomic.load(SeqCst), !0 as *mut _); + + // then and with all 0s + assert_eq!(atomic.fetch_and(0, SeqCst), !0 as *mut _); + assert_eq!(atomic.load(SeqCst), 0 as *mut _); + } +} + static S_FALSE: AtomicBool = AtomicBool::new(false); static S_TRUE: AtomicBool = AtomicBool::new(true); static S_INT: AtomicIsize = AtomicIsize::new(0); diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index 05f958cbe81fe..0452d28f74bd7 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -1,4 +1,5 @@ #![feature(alloc_layout_extra)] +#![feature(atomic_ptr_fetch_op)] #![feature(bool_to_option)] #![feature(bound_cloned)] #![feature(box_syntax)]