Skip to content

Commit 6df66c1

Browse files
committed
auto merge of #7109 : bblum/rust/rwlocks, r=brson
r? @brson links to issues: #7065 the race that's fixed; #7066 the perf improvement I added. There are also some minor cleanup commits here. To measure the performance improvement from replacing the exclusive with an atomic uint, I edited the ```msgsend-ring-rw-arcs``` bench test to do a ```write_downgrade``` instead of just a ```write```, so that it stressed the code paths that accessed ```read_count```. (At first I was still using ```write``` and saw no performance difference whatsoever, whoooops.) The bench test measures how long it takes to send 1,000,000 messages by using rwarcs to emulate pipes. I also measured the performance difference imposed by the fix to the ```access_lock``` race (which involves taking an extra semaphore in the ```cond.wait()``` path). The net result is that fixing the race imposes a 4% to 5% slowdown, but doing the atomic uint optimization gives a 6% to 8% speedup. Note that this speedup will be most visible in read- or downgrade-heavy workloads. If an RWARC's only users are writers, the optimization doesn't matter. All the same, I think this more than justifies the extra complexity I mentioned in #7066. The raw numbers are: ``` with xadd read count before write_cond fix 4.18 to 4.26 us/message with write_cond fix 4.35 to 4.39 us/message with exclusive read count before write_cond fix 4.41 to 4.47 us/message with write_cond fix 4.65 to 4.76 us/message ```
2 parents da42e6b + 2ef8774 commit 6df66c1

File tree

4 files changed

+229
-90
lines changed

4 files changed

+229
-90
lines changed

src/libextra/arc.rs

+67-7
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,6 @@ struct RWARCInner<T> { lock: RWlock, failed: bool, data: T }
281281
#[mutable]
282282
struct RWARC<T> {
283283
x: UnsafeAtomicRcBox<RWARCInner<T>>,
284-
cant_nest: ()
285284
}
286285

287286
/// Create a reader/writer ARC with the supplied data.
@@ -299,15 +298,14 @@ pub fn rw_arc_with_condvars<T:Const + Owned>(
299298
let data =
300299
RWARCInner { lock: rwlock_with_condvars(num_condvars),
301300
failed: false, data: user_data };
302-
RWARC { x: UnsafeAtomicRcBox::new(data), cant_nest: () }
301+
RWARC { x: UnsafeAtomicRcBox::new(data), }
303302
}
304303

305304
impl<T:Const + Owned> RWARC<T> {
306305
/// Duplicate a rwlock-protected ARC, as arc::clone.
307306
pub fn clone(&self) -> RWARC<T> {
308307
RWARC {
309308
x: self.x.clone(),
310-
cant_nest: (),
311309
}
312310
}
313311

@@ -382,12 +380,12 @@ impl<T:Const + Owned> RWARC<T> {
382380
* # Example
383381
*
384382
* ~~~ {.rust}
385-
* do arc.write_downgrade |write_mode| {
386-
* do (&write_mode).write_cond |state, condvar| {
383+
* do arc.write_downgrade |mut write_token| {
384+
* do write_token.write_cond |state, condvar| {
387385
* ... exclusive access with mutable state ...
388386
* }
389-
* let read_mode = arc.downgrade(write_mode);
390-
* do (&read_mode).read |state| {
387+
* let read_token = arc.downgrade(write_token);
388+
* do read_token.read |state| {
391389
* ... shared access with immutable state ...
392390
* }
393391
* }
@@ -815,4 +813,66 @@ mod tests {
815813

816814
wp2.recv(); // complete handshake with writer
817815
}
816+
#[cfg(test)]
817+
fn test_rw_write_cond_downgrade_read_race_helper() {
818+
// Tests that when a downgrader hands off the "reader cloud" lock
819+
// because of a contending reader, a writer can't race to get it
820+
// instead, which would result in readers_and_writers. This tests
821+
// the sync module rather than this one, but it's here because an
822+
// rwarc gives us extra shared state to help check for the race.
823+
// If you want to see this test fail, go to sync.rs and replace the
824+
// line in RWlock::write_cond() that looks like:
825+
// "blk(&Condvar { order: opt_lock, ..*cond })"
826+
// with just "blk(cond)".
827+
let x = ~RWARC(true);
828+
let (wp, wc) = comm::stream();
829+
830+
// writer task
831+
let xw = (*x).clone();
832+
do task::spawn {
833+
do xw.write_cond |state, c| {
834+
wc.send(()); // tell downgrader it's ok to go
835+
c.wait();
836+
// The core of the test is here: the condvar reacquire path
837+
// must involve order_lock, so that it cannot race with a reader
838+
// trying to receive the "reader cloud lock hand-off".
839+
*state = false;
840+
}
841+
}
842+
843+
wp.recv(); // wait for writer to get in
844+
845+
do x.write_downgrade |mut write_mode| {
846+
do write_mode.write_cond |state, c| {
847+
assert!(*state);
848+
// make writer contend in the cond-reacquire path
849+
c.signal();
850+
}
851+
// make a reader task to trigger the "reader cloud lock" handoff
852+
let xr = (*x).clone();
853+
let (rp, rc) = comm::stream();
854+
do task::spawn {
855+
rc.send(());
856+
do xr.read |_state| { }
857+
}
858+
rp.recv(); // wait for reader task to exist
859+
860+
let read_mode = x.downgrade(write_mode);
861+
do read_mode.read |state| {
862+
// if writer mistakenly got in, make sure it mutates state
863+
// before we assert on it
864+
for 5.times { task::yield(); }
865+
// make sure writer didn't get in.
866+
assert!(*state);
867+
}
868+
}
869+
}
870+
#[test]
871+
fn test_rw_write_cond_downgrade_read_race() {
872+
// Ideally the above test case would have yield statements in it that
873+
// helped to expose the race nearly 100% of the time... but adding
874+
// yields in the intuitively-right locations made it even less likely,
875+
// and I wasn't sure why :( . This is a mediocre "next best" option.
876+
for 8.times { test_rw_write_cond_downgrade_read_race_helper() }
877+
}
818878
}

0 commit comments

Comments
 (0)