99//! Note that this implementation does not take into account of C++20's memory model revision to SC accesses
1010//! and fences introduced by P0668 (<https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0668r5.html>).
1111//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
12- //! disallows.
12+ //! disallows (<https://github.com/rust-lang/miri/issues/2301>) .
1313//!
1414//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
1515//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
@@ -82,10 +82,12 @@ use rustc_const_eval::interpret::{
8282} ;
8383use rustc_data_structures:: fx:: FxHashMap ;
8484
85- use crate :: { AtomicReadOp , AtomicRwOp , AtomicWriteOp , Tag , VClock , VTimestamp , VectorIdx } ;
85+ use crate :: {
86+ AtomicReadOrd , AtomicRwOrd , AtomicWriteOrd , Tag , ThreadManager , VClock , VTimestamp , VectorIdx ,
87+ } ;
8688
8789use super :: {
88- data_race:: { GlobalState , ThreadClockSet } ,
90+ data_race:: { GlobalState as DataRaceState , ThreadClockSet } ,
8991 range_object_map:: { AccessType , RangeObjectMap } ,
9092} ;
9193
@@ -149,7 +151,7 @@ impl StoreBufferAlloc {
149151 /// before without data race, we can determine that the non-atomic access fully happens
150152 /// after all the prior atomic accesses so the location no longer needs to exhibit
151153 /// any weak memory behaviours until further atomic accesses.
152- pub fn memory_accessed ( & self , range : AllocRange , global : & GlobalState ) {
154+ pub fn memory_accessed ( & self , range : AllocRange , global : & DataRaceState ) {
153155 if !global. ongoing_action_data_race_free ( ) {
154156 let mut buffers = self . store_buffers . borrow_mut ( ) ;
155157 let access_type = buffers. access_type ( range) ;
@@ -236,17 +238,18 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
236238 }
237239
238240 /// Reads from the last store in modification order
239- fn read_from_last_store ( & self , global : & GlobalState ) {
241+ fn read_from_last_store ( & self , global : & DataRaceState , thread_mgr : & ThreadManager < ' _ , ' _ > ) {
240242 let store_elem = self . buffer . back ( ) ;
241243 if let Some ( store_elem) = store_elem {
242- let ( index, clocks) = global. current_thread_state ( ) ;
244+ let ( index, clocks) = global. current_thread_state ( thread_mgr ) ;
243245 store_elem. load_impl ( index, & clocks) ;
244246 }
245247 }
246248
247249 fn buffered_read (
248250 & self ,
249- global : & GlobalState ,
251+ global : & DataRaceState ,
252+ thread_mgr : & ThreadManager < ' _ , ' _ > ,
250253 is_seqcst : bool ,
251254 rng : & mut ( impl rand:: Rng + ?Sized ) ,
252255 validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
@@ -257,7 +260,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
257260 let store_elem = {
258261 // The `clocks` we got here must be dropped before calling validate_atomic_load
259262 // as the race detector will update it
260- let ( .., clocks) = global. current_thread_state ( ) ;
263+ let ( .., clocks) = global. current_thread_state ( thread_mgr ) ;
261264 // Load from a valid entry in the store buffer
262265 self . fetch_store ( is_seqcst, & clocks, & mut * rng)
263266 } ;
@@ -268,18 +271,19 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
268271 // requires access to ThreadClockSet.clock, which is updated by the race detector
269272 validate ( ) ?;
270273
271- let ( index, clocks) = global. current_thread_state ( ) ;
274+ let ( index, clocks) = global. current_thread_state ( thread_mgr ) ;
272275 let loaded = store_elem. load_impl ( index, & clocks) ;
273276 Ok ( loaded)
274277 }
275278
276279 fn buffered_write (
277280 & mut self ,
278281 val : ScalarMaybeUninit < Tag > ,
279- global : & GlobalState ,
282+ global : & DataRaceState ,
283+ thread_mgr : & ThreadManager < ' _ , ' _ > ,
280284 is_seqcst : bool ,
281285 ) -> InterpResult < ' tcx > {
282- let ( index, clocks) = global. current_thread_state ( ) ;
286+ let ( index, clocks) = global. current_thread_state ( thread_mgr ) ;
283287
284288 self . store_impl ( val, index, & clocks. clock , is_seqcst) ;
285289 Ok ( ( ) )
@@ -428,8 +432,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
428432 {
429433 let range = alloc_range ( base_offset, place. layout . size ) ;
430434 if alloc_buffers. is_overlapping ( range)
431- && !alloc_clocks
432- . race_free_with_atomic ( range, this. machine . data_race . as_ref ( ) . unwrap ( ) )
435+ && !alloc_clocks. race_free_with_atomic (
436+ range,
437+ this. machine . data_race . as_ref ( ) . unwrap ( ) ,
438+ & this. machine . threads ,
439+ )
433440 {
434441 throw_unsup_format ! (
435442 "racy imperfectly overlapping atomic access is not possible in the C++20 memory model, and not supported by Miri's weak memory emulation"
@@ -443,41 +450,41 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
443450 & mut self ,
444451 new_val : ScalarMaybeUninit < Tag > ,
445452 place : & MPlaceTy < ' tcx , Tag > ,
446- atomic : AtomicRwOp ,
453+ atomic : AtomicRwOrd ,
447454 init : ScalarMaybeUninit < Tag > ,
448455 ) -> InterpResult < ' tcx > {
449456 let this = self . eval_context_mut ( ) ;
450457 let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
451458 if let (
452459 crate :: AllocExtra { weak_memory : Some ( alloc_buffers) , .. } ,
453- crate :: Evaluator { data_race : Some ( global) , .. } ,
460+ crate :: Evaluator { data_race : Some ( global) , threads , .. } ,
454461 ) = this. get_alloc_extra_mut ( alloc_id) ?
455462 {
456- if atomic == AtomicRwOp :: SeqCst {
457- global. sc_read ( ) ;
458- global. sc_write ( ) ;
463+ if atomic == AtomicRwOrd :: SeqCst {
464+ global. sc_read ( threads ) ;
465+ global. sc_write ( threads ) ;
459466 }
460467 let range = alloc_range ( base_offset, place. layout . size ) ;
461468 let buffer = alloc_buffers. get_or_create_store_buffer_mut ( range, init) ?;
462- buffer. read_from_last_store ( global) ;
463- buffer. buffered_write ( new_val, global, atomic == AtomicRwOp :: SeqCst ) ?;
469+ buffer. read_from_last_store ( global, threads ) ;
470+ buffer. buffered_write ( new_val, global, threads , atomic == AtomicRwOrd :: SeqCst ) ?;
464471 }
465472 Ok ( ( ) )
466473 }
467474
468475 fn buffered_atomic_read (
469476 & self ,
470477 place : & MPlaceTy < ' tcx , Tag > ,
471- atomic : AtomicReadOp ,
478+ atomic : AtomicReadOrd ,
472479 latest_in_mo : ScalarMaybeUninit < Tag > ,
473480 validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
474481 ) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
475482 let this = self . eval_context_ref ( ) ;
476483 if let Some ( global) = & this. machine . data_race {
477484 let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
478485 if let Some ( alloc_buffers) = this. get_alloc_extra ( alloc_id) ?. weak_memory . as_ref ( ) {
479- if atomic == AtomicReadOp :: SeqCst {
480- global. sc_read ( ) ;
486+ if atomic == AtomicReadOrd :: SeqCst {
487+ global. sc_read ( & this . machine . threads ) ;
481488 }
482489 let mut rng = this. machine . rng . borrow_mut ( ) ;
483490 let buffer = alloc_buffers. get_or_create_store_buffer (
@@ -486,7 +493,8 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
486493 ) ?;
487494 let loaded = buffer. buffered_read (
488495 global,
489- atomic == AtomicReadOp :: SeqCst ,
496+ & this. machine . threads ,
497+ atomic == AtomicReadOrd :: SeqCst ,
490498 & mut * rng,
491499 validate,
492500 ) ?;
@@ -504,18 +512,18 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
504512 & mut self ,
505513 val : ScalarMaybeUninit < Tag > ,
506514 dest : & MPlaceTy < ' tcx , Tag > ,
507- atomic : AtomicWriteOp ,
515+ atomic : AtomicWriteOrd ,
508516 init : ScalarMaybeUninit < Tag > ,
509517 ) -> InterpResult < ' tcx > {
510518 let this = self . eval_context_mut ( ) ;
511519 let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( dest. ptr ) ?;
512520 if let (
513521 crate :: AllocExtra { weak_memory : Some ( alloc_buffers) , .. } ,
514- crate :: Evaluator { data_race : Some ( global) , .. } ,
522+ crate :: Evaluator { data_race : Some ( global) , threads , .. } ,
515523 ) = this. get_alloc_extra_mut ( alloc_id) ?
516524 {
517- if atomic == AtomicWriteOp :: SeqCst {
518- global. sc_write ( ) ;
525+ if atomic == AtomicWriteOrd :: SeqCst {
526+ global. sc_write ( threads ) ;
519527 }
520528
521529 // UGLY HACK: in write_scalar_atomic() we don't know the value before our write,
@@ -535,7 +543,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
535543 buffer. buffer . pop_front ( ) ;
536544 }
537545
538- buffer. buffered_write ( val, global, atomic == AtomicWriteOp :: SeqCst ) ?;
546+ buffer. buffered_write ( val, global, threads , atomic == AtomicWriteOrd :: SeqCst ) ?;
539547 }
540548
541549 // Caller should've written to dest with the vanilla scalar write, we do nothing here
@@ -548,21 +556,21 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
548556 fn perform_read_on_buffered_latest (
549557 & self ,
550558 place : & MPlaceTy < ' tcx , Tag > ,
551- atomic : AtomicReadOp ,
559+ atomic : AtomicReadOrd ,
552560 init : ScalarMaybeUninit < Tag > ,
553561 ) -> InterpResult < ' tcx > {
554562 let this = self . eval_context_ref ( ) ;
555563
556564 if let Some ( global) = & this. machine . data_race {
557- if atomic == AtomicReadOp :: SeqCst {
558- global. sc_read ( ) ;
565+ if atomic == AtomicReadOrd :: SeqCst {
566+ global. sc_read ( & this . machine . threads ) ;
559567 }
560568 let size = place. layout . size ;
561569 let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
562570 if let Some ( alloc_buffers) = this. get_alloc_extra ( alloc_id) ?. weak_memory . as_ref ( ) {
563571 let buffer = alloc_buffers
564572 . get_or_create_store_buffer ( alloc_range ( base_offset, size) , init) ?;
565- buffer. read_from_last_store ( global) ;
573+ buffer. read_from_last_store ( global, & this . machine . threads ) ;
566574 }
567575 }
568576 Ok ( ( ) )
0 commit comments