@@ -1166,6 +1166,216 @@ impl<T> AtomicPtr<T> {
1166
1166
}
1167
1167
}
1168
1168
}
1169
+
1170
+ /// Adds to the current pointer, returning the previous pointer.
1171
+ ///
1172
+ /// Unlike other pointer additions, `fetch_add` increments directly by the provided value,
1173
+ /// rather than interpreting it as a multiple of `size_of<T>`.
1174
+ ///
1175
+ /// This operation wraps around on overflow.
1176
+ ///
1177
+ /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1178
+ /// of this operation. All ordering modes are possible. Note that using
1179
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1180
+ /// using [`Release`] makes the load part [`Relaxed`].
1181
+ ///
1182
+ /// [`Ordering`]: enum.Ordering.html
1183
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1184
+ /// [`Release`]: enum.Ordering.html#variant.Release
1185
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1186
+ ///
1187
+ /// # Examples
1188
+ ///
1189
+ /// ```
1190
+ /// #![feature(atomic_ptr_fetch_op)]
1191
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
1192
+ ///
1193
+ /// let foo = AtomicPtr::new(0 as *mut ());
1194
+ /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0 as *mut _);
1195
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _);
1196
+ /// ```
1197
+ #[ inline]
1198
+ #[ cfg( target_has_atomic = "ptr" ) ]
1199
+ #[ unstable( feature = "atomic_ptr_fetch_op" , issue = "none" ) ]
1200
+ pub fn fetch_add ( & self , val : usize , order : Ordering ) -> * mut T {
1201
+ // SAFETY: data races are prevented by atomic intrinsics.
1202
+ unsafe { crate :: mem:: transmute ( atomic_add ( self . p . get ( ) as * mut usize , val, order) ) }
1203
+ }
1204
+
1205
+ /// Subtracts from the current pointer, returning the previous pointer.
1206
+ ///
1207
+ /// Unlike other pointer subtractions, `fetch_sub` decrements directly by the provided value,
1208
+ /// rather than interpreting it as a multiple of `size_of<T>`.
1209
+ ///
1210
+ /// This operation wraps around on overflow.
1211
+ ///
1212
+ /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1213
+ /// of this operation. All ordering modes are possible. Note that using
1214
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1215
+ /// using [`Release`] makes the load part [`Relaxed`].
1216
+ ///
1217
+ /// [`Ordering`]: enum.Ordering.html
1218
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1219
+ /// [`Release`]: enum.Ordering.html#variant.Release
1220
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1221
+ ///
1222
+ /// # Examples
1223
+ ///
1224
+ /// ```
1225
+ /// #![feature(atomic_ptr_fetch_op)]
1226
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
1227
+ ///
1228
+ /// let foo = AtomicPtr::new(20 as *mut ());
1229
+ /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20 as *mut _);
1230
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _);
1231
+ /// ```
1232
+ #[ inline]
1233
+ #[ cfg( target_has_atomic = "ptr" ) ]
1234
+ #[ unstable( feature = "atomic_ptr_fetch_op" , issue = "none" ) ]
1235
+ pub fn fetch_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1236
+ // SAFETY: data races are prevented by atomic intrinsics.
1237
+ unsafe { crate :: mem:: transmute ( atomic_sub ( self . p . get ( ) as * mut usize , val, order) ) }
1238
+ }
1239
+
1240
+ /// Bitwise "and" with the current value.
1241
+ ///
1242
+ /// Performs a bitwise "and" operation on the current pointer and the argument `val`, and
1243
+ /// sets the new pointer to the result.
1244
+ ///
1245
+ /// Returns the previous pointer.
1246
+ ///
1247
+ /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1248
+ /// of this operation. All ordering modes are possible. Note that using
1249
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1250
+ /// using [`Release`] makes the load part [`Relaxed`].
1251
+ ///
1252
+ /// [`Ordering`]: enum.Ordering.html
1253
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1254
+ /// [`Release`]: enum.Ordering.html#variant.Release
1255
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1256
+ ///
1257
+ /// # Examples
1258
+ ///
1259
+ /// ```
1260
+ /// #![feature(atomic_ptr_fetch_op)]
1261
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
1262
+ ///
1263
+ /// let foo = AtomicPtr::new(0b101101 as *mut ());
1264
+ /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
1265
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001 as *mut _);
1266
+ /// ```
1267
+ #[ inline]
1268
+ #[ cfg( target_has_atomic = "ptr" ) ]
1269
+ #[ unstable( feature = "atomic_ptr_fetch_op" , issue = "none" ) ]
1270
+ pub fn fetch_and ( & self , val : usize , order : Ordering ) -> * mut T {
1271
+ // SAFETY: data races are prevented by atomic intrinsics.
1272
+ unsafe { crate :: mem:: transmute ( atomic_and ( self . p . get ( ) as * mut usize , val, order) ) }
1273
+ }
1274
+
1275
+ /// Bitwise "nand" with the current value.
1276
+ ///
1277
+ /// Performs a bitwise "nand" operation on the current pointer and the argument `val`, and
1278
+ /// sets the new pointer to the result.
1279
+ ///
1280
+ /// Returns the previous pointer.
1281
+ ///
1282
+ /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1283
+ /// of this operation. All ordering modes are possible. Note that using
1284
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1285
+ /// using [`Release`] makes the load part [`Relaxed`].
1286
+ ///
1287
+ /// [`Ordering`]: enum.Ordering.html
1288
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1289
+ /// [`Release`]: enum.Ordering.html#variant.Release
1290
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1291
+ ///
1292
+ /// # Examples
1293
+ ///
1294
+ /// ```
1295
+ /// #![feature(atomic_ptr_fetch_op)]
1296
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
1297
+ ///
1298
+ /// let foo = AtomicPtr::new(0x13 as *mut ());
1299
+ /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13 as *mut _);
1300
+ /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31) as *mut _);
1301
+ /// ```
1302
+ #[ inline]
1303
+ #[ cfg( target_has_atomic = "ptr" ) ]
1304
+ #[ unstable( feature = "atomic_ptr_fetch_op" , issue = "none" ) ]
1305
+ pub fn fetch_nand ( & self , val : usize , order : Ordering ) -> * mut T {
1306
+ // SAFETY: data races are prevented by atomic intrinsics.
1307
+ unsafe { crate :: mem:: transmute ( atomic_nand ( self . p . get ( ) as * mut usize , val, order) ) }
1308
+ }
1309
+
1310
+ /// Bitwise "or" with the current value.
1311
+ ///
1312
+ /// Performs a bitwise "or" operation on the current pointer and the argument `val`, and
1313
+ /// sets the new pointer to the result.
1314
+ ///
1315
+ /// Returns the previous pointer.
1316
+ ///
1317
+ /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1318
+ /// of this operation. All ordering modes are possible. Note that using
1319
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1320
+ /// using [`Release`] makes the load part [`Relaxed`].
1321
+ ///
1322
+ /// [`Ordering`]: enum.Ordering.html
1323
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1324
+ /// [`Release`]: enum.Ordering.html#variant.Release
1325
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1326
+ ///
1327
+ /// # Examples
1328
+ ///
1329
+ /// ```
1330
+ /// #![feature(atomic_ptr_fetch_op)]
1331
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
1332
+ ///
1333
+ /// let foo = AtomicPtr::new(0b101101 as *mut ());
1334
+ /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
1335
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111 as *mut _);
1336
+ /// ```
1337
+ #[ inline]
1338
+ #[ cfg( target_has_atomic = "ptr" ) ]
1339
+ #[ unstable( feature = "atomic_ptr_fetch_op" , issue = "none" ) ]
1340
+ pub fn fetch_or ( & self , val : usize , order : Ordering ) -> * mut T {
1341
+ // SAFETY: data races are prevented by atomic intrinsics.
1342
+ unsafe { crate :: mem:: transmute ( atomic_or ( self . p . get ( ) as * mut usize , val, order) ) }
1343
+ }
1344
+
1345
+ /// Bitwise "xor" with the current value.
1346
+ ///
1347
+ /// Performs a bitwise "xor" operation on the current pointer and the argument `val`, and
1348
+ /// sets the new pointer to the result.
1349
+ ///
1350
+ /// Returns the previous pointer.
1351
+ ///
1352
+ /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1353
+ /// of this operation. All ordering modes are possible. Note that using
1354
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1355
+ /// using [`Release`] makes the load part [`Relaxed`].
1356
+ ///
1357
+ /// [`Ordering`]: enum.Ordering.html
1358
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1359
+ /// [`Release`]: enum.Ordering.html#variant.Release
1360
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1361
+ ///
1362
+ /// # Examples
1363
+ ///
1364
+ /// ```
1365
+ /// #![feature(atomic_ptr_fetch_op)]
1366
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
1367
+ ///
1368
+ /// let foo = AtomicPtr::new(0b101101 as *mut ());
1369
+ /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
1370
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110 as *mut _);
1371
+ /// ```
1372
+ #[ inline]
1373
+ #[ cfg( target_has_atomic = "ptr" ) ]
1374
+ #[ unstable( feature = "atomic_ptr_fetch_op" , issue = "none" ) ]
1375
+ pub fn fetch_xor ( & self , val : usize , order : Ordering ) -> * mut T {
1376
+ // SAFETY: data races are prevented by atomic intrinsics.
1377
+ unsafe { crate :: mem:: transmute ( atomic_xor ( self . p . get ( ) as * mut usize , val, order) ) }
1378
+ }
1169
1379
}
1170
1380
1171
1381
#[ cfg( target_has_atomic_load_store = "8" ) ]
0 commit comments