Skip to content

Commit bc7cee7

Browse files
committed
Move atomically to unstable::sync, and document what it actually does. Close #7872.
1 parent 2e6dc16 commit bc7cee7

File tree

5 files changed

+57
-62
lines changed

5 files changed

+57
-62
lines changed

src/libstd/rt/kill.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ pub struct Death {
8484
on_exit: Option<~fn(bool)>,
8585
// nesting level counter for task::unkillable calls (0 == killable).
8686
unkillable: int,
87-
// nesting level counter for task::atomically calls (0 == can yield).
87+
// nesting level counter for unstable::atomically calls (0 == can yield).
8888
wont_sleep: int,
8989
// A "spare" handle to the kill flag inside the kill handle. Used during
9090
// blocking/waking as an optimization to avoid two xadds on the refcount.

src/libstd/task/mod.rs

-53
Original file line numberDiff line numberDiff line change
@@ -655,44 +655,6 @@ pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
655655
}
656656
}
657657

658-
/**
659-
* A stronger version of unkillable that also inhibits scheduling operations.
660-
* For use with exclusive Arcs, which use pthread mutexes directly.
661-
*/
662-
pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
663-
use rt::task::Task;
664-
665-
match context() {
666-
OldTaskContext => {
667-
let t = rt::rust_get_task();
668-
do (|| {
669-
rt::rust_task_inhibit_kill(t);
670-
rt::rust_task_inhibit_yield(t);
671-
f()
672-
}).finally {
673-
rt::rust_task_allow_yield(t);
674-
rt::rust_task_allow_kill(t);
675-
}
676-
}
677-
TaskContext => {
678-
let t = Local::unsafe_borrow::<Task>();
679-
do (|| {
680-
// It's important to inhibit kill after inhibiting yield, because
681-
// inhibit-kill might fail if we were already killed, and the
682-
// inhibit-yield must happen to match the finally's allow-yield.
683-
(*t).death.inhibit_yield();
684-
(*t).death.inhibit_kill((*t).unwinder.unwinding);
685-
f()
686-
}).finally {
687-
(*t).death.allow_kill((*t).unwinder.unwinding);
688-
(*t).death.allow_yield();
689-
}
690-
}
691-
// FIXME(#3095): As in unkillable().
692-
_ => f()
693-
}
694-
}
695-
696658
#[test] #[should_fail] #[ignore(cfg(windows))]
697659
fn test_cant_dup_task_builder() {
698660
let mut builder = task();
@@ -1177,21 +1139,6 @@ fn test_unkillable_nested() {
11771139
po.recv();
11781140
}
11791141

1180-
#[test] #[should_fail] #[ignore(cfg(windows))]
1181-
fn test_atomically() {
1182-
unsafe { do atomically { yield(); } }
1183-
}
1184-
1185-
#[test]
1186-
fn test_atomically2() {
1187-
unsafe { do atomically { } } yield(); // shouldn't fail
1188-
}
1189-
1190-
#[test] #[should_fail] #[ignore(cfg(windows))]
1191-
fn test_atomically_nested() {
1192-
unsafe { do atomically { do atomically { } yield(); } }
1193-
}
1194-
11951142
#[test]
11961143
fn test_child_doesnt_ref_parent() {
11971144
// If the child refcounts the parent task, this will stack overflow when

src/libstd/unstable/dynamic_lib.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ mod dl {
105105
use path;
106106
use ptr;
107107
use str;
108-
use task;
108+
use unstable::sync::atomically;
109109
use result::*;
110110

111111
pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -120,7 +120,7 @@ mod dl {
120120

121121
pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> {
122122
unsafe {
123-
do task::atomically {
123+
do atomically {
124124
let _old_error = dlerror();
125125

126126
let result = f();
@@ -164,7 +164,7 @@ mod dl {
164164
use libc;
165165
use path;
166166
use ptr;
167-
use task;
167+
use unstable::sync::atomically;
168168
use result::*;
169169

170170
pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -181,7 +181,7 @@ mod dl {
181181

182182
pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> {
183183
unsafe {
184-
do task::atomically {
184+
do atomically {
185185
SetLastError(0);
186186

187187
let result = f();

src/libstd/unstable/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -85,15 +85,15 @@ fn test_run_in_bare_thread_exchange() {
8585
pub fn change_dir_locked(p: &Path, action: &fn()) -> bool {
8686
use os;
8787
use os::change_dir;
88-
use task;
88+
use unstable::sync::atomically;
8989
use unstable::finally::Finally;
9090

9191
unsafe {
9292
// This is really sketchy. Using a pthread mutex so descheduling
9393
// in the `action` callback can cause deadlock. Doing it in
9494
// `task::atomically` to try to avoid that, but ... I don't know
9595
// this is all bogus.
96-
return do task::atomically {
96+
return do atomically {
9797
rust_take_change_dir_lock();
9898

9999
do (||{

src/libstd/unstable/sync.rs

+50-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ use ptr;
1616
use option::*;
1717
use either::{Either, Left, Right};
1818
use task;
19-
use task::atomically;
2019
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
2120
use unstable::finally::Finally;
2221
use ops::Drop;
@@ -271,6 +270,48 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
271270

272271
/****************************************************************************/
273272

273+
/**
274+
* Enables a runtime assertion that no operation in the argument closure shall
275+
* use scheduler operations (yield, recv, spawn, etc). This is for use with
276+
* pthread mutexes, which may block the entire scheduler thread, rather than
277+
* just one task, and is hence prone to deadlocks if mixed with yielding.
278+
*
279+
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
280+
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
281+
*/
282+
// FIXME(#8140) should not be pub
283+
pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
284+
use rt::task::Task;
285+
use task::rt;
286+
use rt::local::Local;
287+
use rt::{context, OldTaskContext, TaskContext};
288+
289+
match context() {
290+
OldTaskContext => {
291+
let t = rt::rust_get_task();
292+
do (|| {
293+
rt::rust_task_inhibit_kill(t);
294+
rt::rust_task_inhibit_yield(t);
295+
f()
296+
}).finally {
297+
rt::rust_task_allow_yield(t);
298+
rt::rust_task_allow_kill(t);
299+
}
300+
}
301+
TaskContext => {
302+
let t = Local::unsafe_borrow::<Task>();
303+
do (|| {
304+
(*t).death.inhibit_yield();
305+
f()
306+
}).finally {
307+
(*t).death.allow_yield();
308+
}
309+
}
310+
// FIXME(#3095): As in unkillable().
311+
_ => f()
312+
}
313+
}
314+
274315
#[allow(non_camel_case_types)] // runtime type
275316
type rust_little_lock = *libc::c_void;
276317

@@ -395,11 +436,18 @@ mod tests {
395436
use cell::Cell;
396437
use comm;
397438
use option::*;
398-
use super::{Exclusive, UnsafeAtomicRcBox};
439+
use super::{Exclusive, UnsafeAtomicRcBox, atomically};
399440
use task;
400441
use uint;
401442
use util;
402443

444+
#[test]
445+
fn test_atomically() {
446+
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
447+
// so we can't really test for the converse behaviour.
448+
unsafe { do atomically { } } task::yield(); // oughtn't fail
449+
}
450+
403451
#[test]
404452
fn exclusive_new_arc() {
405453
unsafe {

0 commit comments

Comments
 (0)