Skip to content

Reserve 'yield' keyword #8560

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/libextra/arc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -762,7 +762,7 @@ mod tests {
do 10.times {
let tmp = *num;
*num = -1;
task::yield();
task::deschedule();
*num = tmp + 1;
}
c.send(());
Expand Down Expand Up @@ -913,17 +913,17 @@ mod tests {
do read_mode.read |state| {
// if writer mistakenly got in, make sure it mutates state
// before we assert on it
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
// make sure writer didn't get in.
assert!(*state);
}
}
}
#[test]
fn test_rw_write_cond_downgrade_read_race() {
// Ideally the above test case would have yield statements in it that
// Ideally the above test case would have deschedule statements in it that
// helped to expose the race nearly 100% of the time... but adding
// yields in the intuitively-right locations made it even less likely,
// deschedules in the intuitively-right locations made it even less likely,
// and I wasn't sure why :( . This is a mediocre "next best" option.
do 8.times { test_rw_write_cond_downgrade_read_race_helper() }
}
Expand Down
20 changes: 10 additions & 10 deletions src/libextra/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ impl<Q:Send> Sem<Q> {
}
}
// Uncomment if you wish to test for sem races. Not valgrind-friendly.
/* do 1000.times { task::yield(); } */
/* do 1000.times { task::deschedule(); } */
// Need to wait outside the exclusive.
if waiter_nobe.is_some() {
let _ = waiter_nobe.unwrap().recv();
Expand Down Expand Up @@ -225,7 +225,7 @@ impl<'self> Condvar<'self> {
}
}

// If yield checks start getting inserted anywhere, we can be
// If deschedule checks start getting inserted anywhere, we can be
// killed before or after enqueueing. Deciding whether to
// unkillably reacquire the lock needs to happen atomically
// wrt enqueuing.
Expand Down Expand Up @@ -731,11 +731,11 @@ mod tests {
let s2 = ~s.clone();
do task::spawn || {
do s2.access {
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
}
}
do s.access {
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
}
}
#[test]
Expand All @@ -748,7 +748,7 @@ mod tests {
s2.acquire();
c.send(());
}
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
s.release();
let _ = p.recv();

Expand All @@ -757,7 +757,7 @@ mod tests {
let s = ~Semaphore::new(0);
let s2 = ~s.clone();
do task::spawn || {
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
s2.release();
let _ = p.recv();
}
Expand Down Expand Up @@ -800,7 +800,7 @@ mod tests {
c.send(());
}
let _ = p.recv(); // wait for child to come alive
do 5.times { task::yield(); } // let the child contend
do 5.times { task::deschedule(); } // let the child contend
}
let _ = p.recv(); // wait for child to be done
}
Expand Down Expand Up @@ -837,7 +837,7 @@ mod tests {
do n.times {
do m.lock {
let oldval = *sharedstate;
task::yield();
task::deschedule();
*sharedstate = oldval + 1;
}
}
Expand Down Expand Up @@ -948,7 +948,7 @@ mod tests {
let (p,c) = comm::stream();
do task::spawn || { // linked
let _ = p.recv(); // wait for sibling to get in the mutex
task::yield();
task::deschedule();
fail!();
}
do m2.lock_cond |cond| {
Expand Down Expand Up @@ -1114,7 +1114,7 @@ mod tests {
do n.times {
do lock_rwlock_in_mode(x, mode) {
let oldval = *sharedstate;
task::yield();
task::deschedule();
*sharedstate = oldval + 1;
}
}
Expand Down
10 changes: 5 additions & 5 deletions src/libstd/rt/kill.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ pub struct Death {
on_exit: Option<~fn(bool)>,
// nesting level counter for task::unkillable calls (0 == killable).
unkillable: int,
// nesting level counter for unstable::atomically calls (0 == can yield).
// nesting level counter for unstable::atomically calls (0 == can deschedule).
wont_sleep: int,
// A "spare" handle to the kill flag inside the kill handle. Used during
// blocking/waking as an optimization to avoid two xadds on the refcount.
Expand Down Expand Up @@ -572,16 +572,16 @@ impl Death {
}

/// Enter a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a subsequent call to allow_yield.
/// All calls must be paired with a subsequent call to allow_deschedule.
#[inline]
pub fn inhibit_yield(&mut self) {
pub fn inhibit_deschedule(&mut self) {
self.wont_sleep += 1;
}

/// Exit a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a preceding call to inhibit_yield.
/// All calls must be paired with a preceding call to inhibit_deschedule.
#[inline]
pub fn allow_yield(&mut self) {
pub fn allow_deschedule(&mut self) {
rtassert!(self.wont_sleep != 0);
self.wont_sleep -= 1;
}
Expand Down
4 changes: 2 additions & 2 deletions src/libstd/select.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ mod test {
let (c2, p3, c4) = x.take();
p3.recv(); // handshake parent
c4.send(()); // normal receive
task::yield();
task::deschedule();
c2.send(()); // select receive
}

Expand Down Expand Up @@ -294,7 +294,7 @@ mod test {
if send_on_chans.contains(&i) {
let c = Cell::new(c);
do spawntask_random {
task::yield();
task::deschedule();
c.take().send(());
}
}
Expand Down
30 changes: 15 additions & 15 deletions src/libstd/task/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ pub fn with_task_name<U>(blk: &fn(Option<&str>) -> U) -> U {
}
}

pub fn yield() {
pub fn deschedule() {
//! Yield control to the task scheduler

use rt::local::Local;
Expand Down Expand Up @@ -568,10 +568,10 @@ pub fn failing() -> bool {
*
* ~~~
* do task::unkillable {
* // detach / yield / destroy must all be called together
* // detach / deschedule / destroy must all be called together
* rustrt::rust_port_detach(po);
* // This must not result in the current task being killed
* task::yield();
* task::deschedule();
* rustrt::rust_port_destroy(po);
* }
* ~~~
Expand Down Expand Up @@ -689,7 +689,7 @@ fn test_spawn_unlinked_unsup_no_fail_down() { // grandchild sends on a port
let ch = ch.clone();
do spawn_unlinked {
// Give middle task a chance to fail-but-not-kill-us.
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
ch.send(()); // If killed first, grandparent hangs.
}
fail!(); // Shouldn't kill either (grand)parent or (grand)child.
Expand All @@ -712,7 +712,7 @@ fn test_spawn_unlinked_sup_no_fail_up() { // child unlinked fails
do run_in_newsched_task {
do spawn_supervised { fail!(); }
// Give child a chance to fail-but-not-kill-us.
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
}
}
#[ignore(reason = "linked failure")]
Expand Down Expand Up @@ -821,7 +821,7 @@ fn test_spawn_failure_propagate_grandchild() {
do spawn_supervised {
do spawn_supervised { block_forever(); }
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
Expand All @@ -838,7 +838,7 @@ fn test_spawn_failure_propagate_secondborn() {
do spawn_supervised {
do spawn { block_forever(); } // linked
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
Expand All @@ -855,7 +855,7 @@ fn test_spawn_failure_propagate_nephew_or_niece() {
do spawn { // linked
do spawn_supervised { block_forever(); }
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
Expand All @@ -872,7 +872,7 @@ fn test_spawn_linked_sup_propagate_sibling() {
do spawn { // linked
do spawn { block_forever(); } // linked
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
Expand Down Expand Up @@ -1169,12 +1169,12 @@ fn test_unkillable() {

// We want to do this after failing
do spawn_unlinked {
do 10.times { yield() }
do 10.times { deschedule() }
ch.send(());
}

do spawn {
yield();
deschedule();
// We want to fail after the unkillable task
// blocks on recv
fail!();
Expand Down Expand Up @@ -1205,12 +1205,12 @@ fn test_unkillable_nested() {

// We want to do this after failing
do spawn_unlinked || {
do 10.times { yield() }
do 10.times { deschedule() }
ch.send(());
}

do spawn {
yield();
deschedule();
// We want to fail after the unkillable task
// blocks on recv
fail!();
Expand Down Expand Up @@ -1277,7 +1277,7 @@ fn test_spawn_watched() {
t.unlinked();
t.watched();
do t.spawn {
task::yield();
task::deschedule();
fail!();
}
}
Expand Down Expand Up @@ -1313,7 +1313,7 @@ fn test_indestructible() {
t.unwatched();
do t.spawn {
p3.recv();
task::yield();
task::deschedule();
fail!();
}
c3.send(());
Expand Down
20 changes: 10 additions & 10 deletions src/libstd/unstable/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,9 +272,9 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{

/**
* Enables a runtime assertion that no operation in the argument closure shall
* use scheduler operations (yield, recv, spawn, etc). This is for use with
* use scheduler operations (deschedule, recv, spawn, etc). This is for use with
* pthread mutexes, which may block the entire scheduler thread, rather than
* just one task, and is hence prone to deadlocks if mixed with yielding.
* just one task, and is hence prone to deadlocks if mixed with descheduling.
*
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
Expand All @@ -288,10 +288,10 @@ pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
if in_green_task_context() {
let t = Local::unsafe_borrow::<Task>();
do (|| {
(*t).death.inhibit_yield();
(*t).death.inhibit_deschedule();
f()
}).finally {
(*t).death.allow_yield();
(*t).death.allow_deschedule();
}
} else {
f()
Expand Down Expand Up @@ -349,7 +349,7 @@ struct ExData<T> {
* This uses a pthread mutex, not one that's aware of the userspace scheduler.
* The user of an Exclusive must be careful not to invoke any functions that may
* reschedule the task while holding the lock, or deadlock may result. If you
* need to block or yield while accessing shared state, use extra::sync::RWArc.
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
*/
pub struct Exclusive<T> {
x: UnsafeAtomicRcBox<ExData<T>>
Expand Down Expand Up @@ -377,7 +377,7 @@ impl<T:Send> Exclusive<T> {
// Exactly like std::arc::MutexArc,access(), but with the LittleLock
// instead of a proper mutex. Same reason for being unsafe.
//
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
// Currently, scheduling operations (i.e., descheduling, receiving on a pipe,
// accessing the provided condition variable) are prohibited while inside
// the Exclusive. Supporting that is a work in progress.
#[inline]
Expand Down Expand Up @@ -431,7 +431,7 @@ mod tests {
fn test_atomically() {
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
// so we can't really test for the converse behaviour.
unsafe { do atomically { } } task::yield(); // oughtn't fail
unsafe { do atomically { } } task::deschedule(); // oughtn't fail
}

#[test]
Expand Down Expand Up @@ -545,7 +545,7 @@ mod tests {
c.send(());
}
p.recv();
task::yield(); // Try to make the unwrapper get blocked first.
task::deschedule(); // Try to make the unwrapper get blocked first.
let left_x = x.try_unwrap();
assert!(left_x.is_left());
util::ignore(left_x);
Expand All @@ -566,7 +566,7 @@ mod tests {
do task::spawn {
let x2 = x2.take();
unsafe { do x2.with |_hello| { } }
task::yield();
task::deschedule();
}
assert!(x.unwrap() == ~~"hello");

Expand Down Expand Up @@ -612,7 +612,7 @@ mod tests {
let x = Exclusive::new(~~"hello");
let x2 = x.clone();
do task::spawn {
do 10.times { task::yield(); } // try to let the unwrapper go
do 10.times { task::deschedule(); } // try to let the unwrapper go
fail!(); // punt it awake from its deadlock
}
let _z = x.unwrap();
Expand Down
Loading