Skip to content

Commit 8e72286

Browse files
committed
Add missing atomic operations to AtomicPtr
This adds various `fetch_` methods to `AtomicPtr` that are present on other `Atomic*` types. It does so such that libraries that depend on atomic operations on pointers do not have to cast those pointers to `usize` and fiddle around with `AtomicUsize` instead. Note that this patch currently implements `fetch_add` and `fetch_sub` without considering the size of the pointer target. This is unlike regular pointer additions and subtractions. The rationale for this is that for atomic operations, the user may indeed wish to truly increment by 1, which is difficult if all deltas are interpreted in increments of the type's size. This patch effectively resurrects the change from #10154. Based on #12949 (comment), the rationale for not making the changes at the time no longer hold.
1 parent dbc3cfd commit 8e72286

File tree

3 files changed

+258
-0
lines changed

3 files changed

+258
-0
lines changed

src/libcore/sync/atomic.rs

+210
Original file line numberDiff line numberDiff line change
@@ -1166,6 +1166,216 @@ impl<T> AtomicPtr<T> {
11661166
}
11671167
}
11681168
}
1169+
1170+
/// Adds to the current pointer, returning the previous pointer.
1171+
///
1172+
/// Unlike other pointer additions, `fetch_add` increments directly by the provided value,
1173+
/// rather than interpreting it as a multiple of `size_of<T>`.
1174+
///
1175+
/// This operation wraps around on overflow.
1176+
///
1177+
/// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1178+
/// of this operation. All ordering modes are possible. Note that using
1179+
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1180+
/// using [`Release`] makes the load part [`Relaxed`].
1181+
///
1182+
/// [`Ordering`]: enum.Ordering.html
1183+
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1184+
/// [`Release`]: enum.Ordering.html#variant.Release
1185+
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1186+
///
1187+
/// # Examples
1188+
///
1189+
/// ```
1190+
/// #![feature(atomic_ptr_fetch_op)]
1191+
/// use std::sync::atomic::{AtomicPtr, Ordering};
1192+
///
1193+
/// let foo = AtomicPtr::new(0 as *mut ());
1194+
/// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0 as *mut _);
1195+
/// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _);
1196+
/// ```
1197+
#[inline]
1198+
#[cfg(target_has_atomic = "ptr")]
1199+
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
1200+
pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T {
1201+
// SAFETY: data races are prevented by atomic intrinsics.
1202+
unsafe { crate::mem::transmute(atomic_add(self.p.get() as *mut usize, val, order)) }
1203+
}
1204+
1205+
/// Subtracts from the current pointer, returning the previous pointer.
1206+
///
1207+
/// Unlike other pointer subtractions, `fetch_sub` decrements directly by the provided value,
1208+
/// rather than interpreting it as a multiple of `size_of<T>`.
1209+
///
1210+
/// This operation wraps around on overflow.
1211+
///
1212+
/// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1213+
/// of this operation. All ordering modes are possible. Note that using
1214+
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1215+
/// using [`Release`] makes the load part [`Relaxed`].
1216+
///
1217+
/// [`Ordering`]: enum.Ordering.html
1218+
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1219+
/// [`Release`]: enum.Ordering.html#variant.Release
1220+
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1221+
///
1222+
/// # Examples
1223+
///
1224+
/// ```
1225+
/// #![feature(atomic_ptr_fetch_op)]
1226+
/// use std::sync::atomic::{AtomicPtr, Ordering};
1227+
///
1228+
/// let foo = AtomicPtr::new(20 as *mut ());
1229+
/// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20 as *mut _);
1230+
/// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _);
1231+
/// ```
1232+
#[inline]
1233+
#[cfg(target_has_atomic = "ptr")]
1234+
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
1235+
pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T {
1236+
// SAFETY: data races are prevented by atomic intrinsics.
1237+
unsafe { crate::mem::transmute(atomic_sub(self.p.get() as *mut usize, val, order)) }
1238+
}
1239+
1240+
/// Bitwise "and" with the current value.
1241+
///
1242+
/// Performs a bitwise "and" operation on the current pointer and the argument `val`, and
1243+
/// sets the new pointer to the result.
1244+
///
1245+
/// Returns the previous pointer.
1246+
///
1247+
/// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1248+
/// of this operation. All ordering modes are possible. Note that using
1249+
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1250+
/// using [`Release`] makes the load part [`Relaxed`].
1251+
///
1252+
/// [`Ordering`]: enum.Ordering.html
1253+
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1254+
/// [`Release`]: enum.Ordering.html#variant.Release
1255+
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1256+
///
1257+
/// # Examples
1258+
///
1259+
/// ```
1260+
/// #![feature(atomic_ptr_fetch_op)]
1261+
/// use std::sync::atomic::{AtomicPtr, Ordering};
1262+
///
1263+
/// let foo = AtomicPtr::new(0b101101 as *mut ());
1264+
/// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
1265+
/// assert_eq!(foo.load(Ordering::SeqCst), 0b100001 as *mut _);
1266+
/// ```
1267+
#[inline]
1268+
#[cfg(target_has_atomic = "ptr")]
1269+
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
1270+
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
1271+
// SAFETY: data races are prevented by atomic intrinsics.
1272+
unsafe { crate::mem::transmute(atomic_and(self.p.get() as *mut usize, val, order)) }
1273+
}
1274+
1275+
/// Bitwise "nand" with the current value.
1276+
///
1277+
/// Performs a bitwise "nand" operation on the current pointer and the argument `val`, and
1278+
/// sets the new pointer to the result.
1279+
///
1280+
/// Returns the previous pointer.
1281+
///
1282+
/// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1283+
/// of this operation. All ordering modes are possible. Note that using
1284+
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1285+
/// using [`Release`] makes the load part [`Relaxed`].
1286+
///
1287+
/// [`Ordering`]: enum.Ordering.html
1288+
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1289+
/// [`Release`]: enum.Ordering.html#variant.Release
1290+
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1291+
///
1292+
/// # Examples
1293+
///
1294+
/// ```
1295+
/// #![feature(atomic_ptr_fetch_op)]
1296+
/// use std::sync::atomic::{AtomicPtr, Ordering};
1297+
///
1298+
/// let foo = AtomicPtr::new(0x13 as *mut ());
1299+
/// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13 as *mut _);
1300+
/// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31) as *mut _);
1301+
/// ```
1302+
#[inline]
1303+
#[cfg(target_has_atomic = "ptr")]
1304+
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
1305+
pub fn fetch_nand(&self, val: usize, order: Ordering) -> *mut T {
1306+
// SAFETY: data races are prevented by atomic intrinsics.
1307+
unsafe { crate::mem::transmute(atomic_nand(self.p.get() as *mut usize, val, order)) }
1308+
}
1309+
1310+
/// Bitwise "or" with the current value.
1311+
///
1312+
/// Performs a bitwise "or" operation on the current pointer and the argument `val`, and
1313+
/// sets the new pointer to the result.
1314+
///
1315+
/// Returns the previous pointer.
1316+
///
1317+
/// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1318+
/// of this operation. All ordering modes are possible. Note that using
1319+
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1320+
/// using [`Release`] makes the load part [`Relaxed`].
1321+
///
1322+
/// [`Ordering`]: enum.Ordering.html
1323+
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1324+
/// [`Release`]: enum.Ordering.html#variant.Release
1325+
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1326+
///
1327+
/// # Examples
1328+
///
1329+
/// ```
1330+
/// #![feature(atomic_ptr_fetch_op)]
1331+
/// use std::sync::atomic::{AtomicPtr, Ordering};
1332+
///
1333+
/// let foo = AtomicPtr::new(0b101101 as *mut ());
1334+
/// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
1335+
/// assert_eq!(foo.load(Ordering::SeqCst), 0b111111 as *mut _);
1336+
/// ```
1337+
#[inline]
1338+
#[cfg(target_has_atomic = "ptr")]
1339+
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
1340+
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
1341+
// SAFETY: data races are prevented by atomic intrinsics.
1342+
unsafe { crate::mem::transmute(atomic_or(self.p.get() as *mut usize, val, order)) }
1343+
}
1344+
1345+
/// Bitwise "xor" with the current value.
1346+
///
1347+
/// Performs a bitwise "xor" operation on the current pointer and the argument `val`, and
1348+
/// sets the new pointer to the result.
1349+
///
1350+
/// Returns the previous pointer.
1351+
///
1352+
/// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1353+
/// of this operation. All ordering modes are possible. Note that using
1354+
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1355+
/// using [`Release`] makes the load part [`Relaxed`].
1356+
///
1357+
/// [`Ordering`]: enum.Ordering.html
1358+
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1359+
/// [`Release`]: enum.Ordering.html#variant.Release
1360+
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1361+
///
1362+
/// # Examples
1363+
///
1364+
/// ```
1365+
/// #![feature(atomic_ptr_fetch_op)]
1366+
/// use std::sync::atomic::{AtomicPtr, Ordering};
1367+
///
1368+
/// let foo = AtomicPtr::new(0b101101 as *mut ());
1369+
/// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
1370+
/// assert_eq!(foo.load(Ordering::SeqCst), 0b011110 as *mut _);
1371+
/// ```
1372+
#[inline]
1373+
#[cfg(target_has_atomic = "ptr")]
1374+
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
1375+
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
1376+
// SAFETY: data races are prevented by atomic intrinsics.
1377+
unsafe { crate::mem::transmute(atomic_xor(self.p.get() as *mut usize, val, order)) }
1378+
}
11691379
}
11701380

11711381
#[cfg(target_has_atomic_load_store = "8")]

src/libcore/tests/atomic.rs

+47
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,53 @@ fn int_xor() {
8787
assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
8888
}
8989

90+
#[test]
91+
fn atomic_ptr() {
92+
// This test assumes a contiguous memory layout for a (tuple) pair of usize
93+
unsafe {
94+
let mut mem: (usize, usize) = (1, 2);
95+
let mut ptr = &mut mem.0 as *mut usize;
96+
// ptr points to .0
97+
let atomic = AtomicPtr::new(ptr);
98+
// atomic points to .0
99+
assert_eq!(atomic.fetch_add(core::mem::size_of::<usize>(), SeqCst), ptr);
100+
// atomic points to .1
101+
ptr = atomic.load(SeqCst);
102+
// ptr points to .1
103+
assert_eq!(*ptr, 2);
104+
atomic.fetch_sub(core::mem::size_of::<usize>(), SeqCst);
105+
// atomic points to .0
106+
ptr = atomic.load(SeqCst);
107+
// ptr points to .0
108+
assert_eq!(*ptr, 1);
109+
110+
// now try xor and back
111+
assert_eq!(atomic.fetch_xor(ptr as usize, SeqCst), ptr);
112+
// atomic is NULL
113+
assert_eq!(atomic.fetch_xor(ptr as usize, SeqCst), std::ptr::null_mut());
114+
// atomic points to .0
115+
ptr = atomic.load(SeqCst);
116+
// ptr points to .0
117+
assert_eq!(*ptr, 1);
118+
119+
// then and with all 1s
120+
assert_eq!(atomic.fetch_and(!0, SeqCst), ptr);
121+
assert_eq!(atomic.load(SeqCst), ptr);
122+
123+
// then or with all 0s
124+
assert_eq!(atomic.fetch_or(0, SeqCst), ptr);
125+
assert_eq!(atomic.load(SeqCst), ptr);
126+
127+
// then or with all 1s
128+
assert_eq!(atomic.fetch_or(!0, SeqCst), ptr);
129+
assert_eq!(atomic.load(SeqCst), !0 as *mut _);
130+
131+
// then and with all 0s
132+
assert_eq!(atomic.fetch_and(0, SeqCst), !0 as *mut _);
133+
assert_eq!(atomic.load(SeqCst), 0 as *mut _);
134+
}
135+
}
136+
90137
static S_FALSE: AtomicBool = AtomicBool::new(false);
91138
static S_TRUE: AtomicBool = AtomicBool::new(true);
92139
static S_INT: AtomicIsize = AtomicIsize::new(0);

src/libcore/tests/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#![feature(alloc_layout_extra)]
2+
#![feature(atomic_ptr_fetch_op)]
23
#![feature(bool_to_option)]
34
#![feature(bound_cloned)]
45
#![feature(box_syntax)]

0 commit comments

Comments
 (0)