From fe9958f430ec1aec2ec8dbb36fc6db16c3493f8a Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 1 Aug 2022 00:14:04 +0900 Subject: [PATCH] Provide stable equivalent of #![feature(strict_provenance_atomic_ptr)] - `AtomicPtr::fetch_ptr_{add,sub}` - `AtomicPtr::fetch_byte_{add,sub}` - `AtomicPtr::fetch_{or,and,xor}` These APIs are compatible with strict-provenance on `cfg(miri)`. Otherwise, they are compatible with permissive-provenance. Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, these APIs will be strict-provenance compatible in all cases from the version in which it is stabilized. --- CHANGELOG.md | 9 + Cargo.toml | 1 + README.md | 1 + build.rs | 5 + src/lib.rs | 409 +++++++++++++++++++++++++++++++++++++++++++- src/tests/helper.rs | 83 +++++++++ 6 files changed, 506 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b9da781..14c4c8a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,15 @@ Note: In this file, do not use the hard wrap in the middle of a sentence for com ## [Unreleased] +- Provide stable equivalent of [`#![feature(strict_provenance_atomic_ptr)]`](https://github.com/rust-lang/rust/issues/99108). + + - `AtomicPtr::fetch_ptr_{add,sub}` + - `AtomicPtr::fetch_byte_{add,sub}` + - `AtomicPtr::fetch_{or,and,xor}` + + These APIs are compatible with strict-provenance on `cfg(miri)`. Otherwise, they are compatible with permissive-provenance. + Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, these APIs will be strict-provenance compatible in all cases from the version in which it is stabilized. + ## [0.3.6] - 2022-07-26 - Fix build failure due to the existence of the `specs` directory. diff --git a/Cargo.toml b/Cargo.toml index 9803223d..c5486148 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,4 +67,5 @@ paste = "1" quickcheck = { default-features = false, git = "https://github.com/taiki-e/quickcheck.git", branch = "dev" } # https://github.com/BurntSushi/quickcheck/pull/304 + https://github.com/BurntSushi/quickcheck/pull/282 + lower MSRV serde = { version = "1", features = ["derive"] } serde_test = "1" +sptr = "0.3" static_assertions = "1" diff --git a/README.md b/README.md index 078234ce..a56ea848 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ Portable atomic types including support for 128-bit atomics, atomic float, etc. - Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr) - Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg)) +- Provide stable equivalents of the standard library atomic types' unstable APIs, such as [`AtomicPtr::fetch_*`](https://github.com/rust-lang/rust/issues/99108). - Make features that require newer compilers, such as [fetch_max](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicUsize.html#method.fetch_max), [fetch_min](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicUsize.html#method.fetch_min), [fetch_update](https://doc.rust-lang.org/nightly/std/sync/atomic/struct.AtomicPtr.html#method.fetch_update), and [stronger CAS failure ordering](https://github.com/rust-lang/rust/pull/98383) available on Rust 1.34+. ## 128-bit atomics support diff --git a/build.rs b/build.rs index fd9d9c65..24252de2 100644 --- a/build.rs +++ b/build.rs @@ -112,6 +112,11 @@ fn main() { if version.nightly { println!("cargo:rustc-cfg=portable_atomic_nightly"); + // https://github.com/rust-lang/rust/pull/96935 merged in Rust 1.64 (nightly-2022-07-07). + if version.probe(64, 2022, 7, 6) { + println!("cargo:rustc-cfg=portable_atomic_unstable_strict_provenance_atomic_ptr"); + } + // `cfg(sanitize = "..")` is not stabilized. let sanitize = std::env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); if sanitize.contains("thread") { diff --git a/src/lib.rs b/src/lib.rs index 39752537..62e56105 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,6 +7,7 @@ Portable atomic types including support for 128-bit atomics, atomic float, etc. - Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr) - Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg)) +- Provide stable equivalents of the standard library atomic types' unstable APIs, such as [`AtomicPtr::fetch_*`](https://github.com/rust-lang/rust/issues/99108). - Make features that require newer compilers, such as [fetch_max](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicUsize.html#method.fetch_max), [fetch_min](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicUsize.html#method.fetch_min), [fetch_update](https://doc.rust-lang.org/nightly/std/sync/atomic/struct.AtomicPtr.html#method.fetch_update), and [stronger CAS failure ordering](https://github.com/rust-lang/rust/pull/98383) available on Rust 1.34+. ## 128-bit atomics support @@ -166,6 +167,11 @@ See [this list](https://github.com/taiki-e/portable-atomic/issues/10#issuecommen all(any(target_arch = "avr", target_arch = "msp430"), portable_atomic_no_asm), feature(llvm_asm) )] +// miri only +#![cfg_attr( + all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr), + feature(strict_provenance_atomic_ptr) +)] // docs only #![cfg_attr(docsrs, feature(doc_cfg))] @@ -1242,8 +1248,407 @@ impl AtomicPtr { Err(prev) } - // TODO: add fetch_ptr_add, fetch_ptr_sub, fetch_byte_add, fetch_byte_sub, fetch_or, fetch_and, fetch_xor - // https://github.com/rust-lang/rust/pull/96935 + /// Offsets the pointer's address by adding `val` (in units of `T`), + /// returning the previous pointer. + /// + /// This is equivalent to using [`wrapping_add`] to atomically perform the + /// equivalent of `ptr = ptr.wrapping_add(val);`. + /// + /// This method operates in units of `T`, which means that it cannot be used + /// to offset the pointer by an amount which is not a multiple of + /// `size_of::()`. This can sometimes be inconvenient, as you may want to + /// work with a deliberately misaligned pointer. In such cases, you may use + /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead. + /// + /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`wrapping_add`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let atom = AtomicPtr::::new(core::ptr::null_mut()); + /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); + /// // Note: units of `size_of::()`. + /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::()), order) + } + + /// Offsets the pointer's address by subtracting `val` (in units of `T`), + /// returning the previous pointer. + /// + /// This is equivalent to using [`wrapping_sub`] to atomically perform the + /// equivalent of `ptr = ptr.wrapping_sub(val);`. + /// + /// This method operates in units of `T`, which means that it cannot be used + /// to offset the pointer by an amount which is not a multiple of + /// `size_of::()`. This can sometimes be inconvenient, as you may want to + /// work with a deliberately misaligned pointer. In such cases, you may use + /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead. + /// + /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_sub + /// + /// # Examples + /// + /// ``` + /// use portable_atomic::{AtomicPtr, Ordering}; + /// + /// let array = [1i32, 2i32]; + /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); + /// + /// assert!(core::ptr::eq(atom.fetch_ptr_sub(1, Ordering::Relaxed), &array[1],)); + /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::()), order) + } + + /// Offsets the pointer's address by adding `val` *bytes*, returning the + /// previous pointer. + /// + /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically + /// perform `ptr = ptr.cast::().wrapping_add(val).cast::()`. + /// + /// `fetch_byte_add` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`wrapping_add`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add + /// [`cast`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.cast + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let atom = AtomicPtr::::new(core::ptr::null_mut()); + /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0); + /// // Note: in units of bytes, not `size_of::()`. + /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T { + // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance + // compatible, but it is unstable. So, for now use it only on cfg(miri). + // Code using AtomicUsize::fetch_* via casts is still permissive-provenance + // compatible and is sound. + // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, + // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. + #[cfg(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr))] + { + self.inner.fetch_byte_add(val, order) + } + #[cfg(not(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr)))] + { + self.as_atomic_usize().fetch_add(val, order) as *mut T + } + } + + /// Offsets the pointer's address by subtracting `val` *bytes*, returning the + /// previous pointer. + /// + /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically + /// perform `ptr = ptr.cast::().wrapping_sub(val).cast::()`. + /// + /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_sub + /// [`cast`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.cast + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let atom = AtomicPtr::::new(sptr::invalid_mut(1)); + /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1); + /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T { + // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance + // compatible, but it is unstable. So, for now use it only on cfg(miri). + // Code using AtomicUsize::fetch_* via casts is still permissive-provenance + // compatible and is sound. + // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, + // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. + #[cfg(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr))] + { + self.inner.fetch_byte_sub(val, order) + } + #[cfg(not(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr)))] + { + self.as_atomic_usize().fetch_sub(val, order) as *mut T + } + } + + /// Performs a bitwise "or" operation on the address of the current pointer, + /// and the argument `val`, and stores a pointer with provenance of the + /// current pointer and the resulting address. + /// + /// This is equivalent equivalent to using [`map_addr`] to atomically + /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged + /// pointer schemes to atomically set tag bits. + /// + /// **Caveat**: This operation returns the previous value. To compute the + /// stored value without losing provenance, you may use [`map_addr`]. For + /// example: `a.fetch_or(val).map_addr(|a| a | val)`. + /// + /// `fetch_or` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance + /// experiment, see the [module documentation for `ptr`][crate::ptr] for + /// details. + /// + /// [`map_addr`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.map_addr + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let pointer = &mut 3i64 as *mut i64; + /// + /// let atom = AtomicPtr::::new(pointer); + /// // Tag the bottom bit of the pointer. + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0); + /// // Extract and untag. + /// let tagged = atom.load(Ordering::Relaxed); + /// assert_eq!(tagged.addr() & 1, 1); + /// assert_eq!(tagged.map_addr(|p| p & !1), pointer); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T { + // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance + // compatible, but it is unstable. So, for now use it only on cfg(miri). + // Code using AtomicUsize::fetch_* via casts is still permissive-provenance + // compatible and is sound. + // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, + // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. + #[cfg(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr))] + { + self.inner.fetch_or(val, order) + } + #[cfg(not(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr)))] + { + self.as_atomic_usize().fetch_or(val, order) as *mut T + } + } + + /// Performs a bitwise "and" operation on the address of the current + /// pointer, and the argument `val`, and stores a pointer with provenance of + /// the current pointer and the resulting address. + /// + /// This is equivalent equivalent to using [`map_addr`] to atomically + /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged + /// pointer schemes to atomically unset tag bits. + /// + /// **Caveat**: This operation returns the previous value. To compute the + /// stored value without losing provenance, you may use [`map_addr`]. For + /// example: `a.fetch_and(val).map_addr(|a| a & val)`. + /// + /// `fetch_and` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance + /// experiment, see the [module documentation for `ptr`][crate::ptr] for + /// details. + /// + /// [`map_addr`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.map_addr + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let pointer = &mut 3i64 as *mut i64; + /// // A tagged pointer + /// let atom = AtomicPtr::::new(pointer.map_addr(|a| a | 1)); + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1); + /// // Untag, and extract the previously tagged pointer. + /// let untagged = atom.fetch_and(!1, Ordering::Relaxed).map_addr(|a| a & !1); + /// assert_eq!(untagged, pointer); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T { + // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance + // compatible, but it is unstable. So, for now use it only on cfg(miri). + // Code using AtomicUsize::fetch_* via casts is still permissive-provenance + // compatible and is sound. + // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, + // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. + #[cfg(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr))] + { + self.inner.fetch_and(val, order) + } + #[cfg(not(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr)))] + { + self.as_atomic_usize().fetch_and(val, order) as *mut T + } + } + + /// Performs a bitwise "xor" operation on the address of the current + /// pointer, and the argument `val`, and stores a pointer with provenance of + /// the current pointer and the resulting address. + /// + /// This is equivalent equivalent to using [`map_addr`] to atomically + /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged + /// pointer schemes to atomically toggle tag bits. + /// + /// **Caveat**: This operation returns the previous value. To compute the + /// stored value without losing provenance, you may use [`map_addr`]. For + /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`. + /// + /// `fetch_xor` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance + /// experiment, see the [module documentation for `ptr`][crate::ptr] for + /// details. + /// + /// [`map_addr`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.map_addr + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let pointer = &mut 3i64 as *mut i64; + /// let atom = AtomicPtr::::new(pointer); + /// + /// // Toggle a tag bit on the pointer. + /// atom.fetch_xor(1, Ordering::Relaxed); + /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1); + /// ``` + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T { + // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance + // compatible, but it is unstable. So, for now use it only on cfg(miri). + // Code using AtomicUsize::fetch_* via casts is still permissive-provenance + // compatible and is sound. + // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, + // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. + #[cfg(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr))] + { + self.inner.fetch_xor(val, order) + } + #[cfg(not(all(miri, portable_atomic_unstable_strict_provenance_atomic_ptr)))] + { + self.as_atomic_usize().fetch_xor(val, order) as *mut T + } + } + + #[cfg(not(miri))] + #[inline] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) + )] + fn as_atomic_usize(&self) -> &AtomicUsize { + let [] = [(); core::mem::size_of::>() - core::mem::size_of::()]; + let [] = + [(); core::mem::align_of::>() - core::mem::align_of::()]; + // SAFETY: AtomicPtr and AtomicUsize have the same layout, + // and both access data in the same way. + unsafe { &*(self as *const AtomicPtr as *const AtomicUsize) } + } } macro_rules! atomic_int { diff --git a/src/tests/helper.rs b/src/tests/helper.rs index 3ad06831..d1b8c48e 100644 --- a/src/tests/helper.rs +++ b/src/tests/helper.rs @@ -1155,6 +1155,7 @@ macro_rules! __test_atomic_bool_pub { macro_rules! __test_atomic_ptr_pub { ($atomic_type:ty) => { __test_atomic_pub_common!($atomic_type, *mut u8); + use sptr::Strict; #[test] fn fetch_update() { let a = <$atomic_type>::new(ptr::null_mut()); @@ -1177,6 +1178,87 @@ macro_rules! __test_atomic_ptr_pub { assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst))); assert_eq!(std::format!("{:p}", a), std::format!("{:p}", a.load(Ordering::SeqCst))); } + // https://github.com/rust-lang/rust/blob/76822a28780a9a93be04409e52c5df21663aab97/library/core/tests/atomic.rs#L130-L213 + #[test] + fn ptr_add_null() { + let atom = AtomicPtr::::new(core::ptr::null_mut()); + assert_eq!(atom.fetch_ptr_add(1, Ordering::SeqCst).addr(), 0); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 8); + + assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst).addr(), 8); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 9); + + assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst).addr(), 9); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 1); + + assert_eq!(atom.fetch_byte_sub(1, Ordering::SeqCst).addr(), 1); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 0); + } + #[test] + fn ptr_add_data() { + let num = 0i64; + let n = &num as *const i64 as *mut _; + let atom = AtomicPtr::::new(n); + assert_eq!(atom.fetch_ptr_add(1, Ordering::SeqCst), n); + assert_eq!(atom.load(Ordering::SeqCst), n.wrapping_add(1)); + + assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst), n.wrapping_add(1)); + assert_eq!(atom.load(Ordering::SeqCst), n); + let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); + + assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst), n); + assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(1)); + + assert_eq!(atom.fetch_byte_add(5, Ordering::SeqCst), bytes_from_n(1)); + assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(6)); + + assert_eq!(atom.fetch_byte_sub(1, Ordering::SeqCst), bytes_from_n(6)); + assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(5)); + + assert_eq!(atom.fetch_byte_sub(5, Ordering::SeqCst), bytes_from_n(5)); + assert_eq!(atom.load(Ordering::SeqCst), n); + } + #[test] + fn ptr_bitops() { + let atom = AtomicPtr::::new(core::ptr::null_mut()); + assert_eq!(atom.fetch_or(0b0111, Ordering::SeqCst).addr(), 0); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b0111); + + assert_eq!(atom.fetch_and(0b1101, Ordering::SeqCst).addr(), 0b0111); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b0101); + + assert_eq!(atom.fetch_xor(0b1111, Ordering::SeqCst).addr(), 0b0101); + assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b1010); + } + #[test] + fn ptr_bitops_tagging() { + const MASK_TAG: usize = 0b1111; + const MASK_PTR: usize = !MASK_TAG; + + #[repr(align(16))] + struct Tagme(u128); + + let tagme = Tagme(1000); + let ptr = &tagme as *const Tagme as *mut Tagme; + let atom: AtomicPtr = AtomicPtr::new(ptr); + + assert_eq!(ptr.addr() & MASK_TAG, 0); + + assert_eq!(atom.fetch_or(0b0111, Ordering::SeqCst), ptr); + assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b111)); + + assert_eq!( + atom.fetch_and(MASK_PTR | 0b0010, Ordering::SeqCst), + ptr.map_addr(|a| a | 0b111) + ); + assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b0010)); + + assert_eq!(atom.fetch_xor(0b1011, Ordering::SeqCst), ptr.map_addr(|a| a | 0b0010)); + assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001)); + + assert_eq!(atom.fetch_and(MASK_PTR, Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001)); + assert_eq!(atom.load(Ordering::SeqCst), ptr); + } }; } @@ -1317,6 +1399,7 @@ macro_rules! test_atomic_bool_pub { macro_rules! test_atomic_ptr_pub { () => { #[allow(clippy::undocumented_unsafe_blocks)] + #[allow(unstable_name_collisions)] mod test_atomic_bool_ptr { use super::*; __test_atomic_ptr_load_store!(AtomicPtr);