Skip to content

Commit 418e1eb

Browse files
committed
Reserve 'yield' keyword
Rename task::yield() to task::deschedule(). Fixes #8494.
1 parent 680eb71 commit 418e1eb

23 files changed

+75
-72
lines changed

src/libextra/arc.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -762,7 +762,7 @@ mod tests {
762762
do 10.times {
763763
let tmp = *num;
764764
*num = -1;
765-
task::yield();
765+
task::deschedule();
766766
*num = tmp + 1;
767767
}
768768
c.send(());
@@ -913,17 +913,17 @@ mod tests {
913913
do read_mode.read |state| {
914914
// if writer mistakenly got in, make sure it mutates state
915915
// before we assert on it
916-
do 5.times { task::yield(); }
916+
do 5.times { task::deschedule(); }
917917
// make sure writer didn't get in.
918918
assert!(*state);
919919
}
920920
}
921921
}
922922
#[test]
923923
fn test_rw_write_cond_downgrade_read_race() {
924-
// Ideally the above test case would have yield statements in it that
924+
// Ideally the above test case would have deschedule statements in it that
925925
// helped to expose the race nearly 100% of the time... but adding
926-
// yields in the intuitively-right locations made it even less likely,
926+
// deschedules in the intuitively-right locations made it even less likely,
927927
// and I wasn't sure why :( . This is a mediocre "next best" option.
928928
do 8.times { test_rw_write_cond_downgrade_read_race_helper() }
929929
}

src/libextra/sync.rs

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ impl<Q:Send> Sem<Q> {
112112
}
113113
}
114114
// Uncomment if you wish to test for sem races. Not valgrind-friendly.
115-
/* do 1000.times { task::yield(); } */
115+
/* do 1000.times { task::deschedule(); } */
116116
// Need to wait outside the exclusive.
117117
if waiter_nobe.is_some() {
118118
let _ = waiter_nobe.unwrap().recv();
@@ -225,7 +225,7 @@ impl<'self> Condvar<'self> {
225225
}
226226
}
227227

228-
// If yield checks start getting inserted anywhere, we can be
228+
// If deschedule checks start getting inserted anywhere, we can be
229229
// killed before or after enqueueing. Deciding whether to
230230
// unkillably reacquire the lock needs to happen atomically
231231
// wrt enqueuing.
@@ -731,11 +731,11 @@ mod tests {
731731
let s2 = ~s.clone();
732732
do task::spawn || {
733733
do s2.access {
734-
do 5.times { task::yield(); }
734+
do 5.times { task::deschedule(); }
735735
}
736736
}
737737
do s.access {
738-
do 5.times { task::yield(); }
738+
do 5.times { task::deschedule(); }
739739
}
740740
}
741741
#[test]
@@ -748,7 +748,7 @@ mod tests {
748748
s2.acquire();
749749
c.send(());
750750
}
751-
do 5.times { task::yield(); }
751+
do 5.times { task::deschedule(); }
752752
s.release();
753753
let _ = p.recv();
754754

@@ -757,7 +757,7 @@ mod tests {
757757
let s = ~Semaphore::new(0);
758758
let s2 = ~s.clone();
759759
do task::spawn || {
760-
do 5.times { task::yield(); }
760+
do 5.times { task::deschedule(); }
761761
s2.release();
762762
let _ = p.recv();
763763
}
@@ -800,7 +800,7 @@ mod tests {
800800
c.send(());
801801
}
802802
let _ = p.recv(); // wait for child to come alive
803-
do 5.times { task::yield(); } // let the child contend
803+
do 5.times { task::deschedule(); } // let the child contend
804804
}
805805
let _ = p.recv(); // wait for child to be done
806806
}
@@ -837,7 +837,7 @@ mod tests {
837837
do n.times {
838838
do m.lock {
839839
let oldval = *sharedstate;
840-
task::yield();
840+
task::deschedule();
841841
*sharedstate = oldval + 1;
842842
}
843843
}
@@ -948,7 +948,7 @@ mod tests {
948948
let (p,c) = comm::stream();
949949
do task::spawn || { // linked
950950
let _ = p.recv(); // wait for sibling to get in the mutex
951-
task::yield();
951+
task::deschedule();
952952
fail!();
953953
}
954954
do m2.lock_cond |cond| {
@@ -1114,7 +1114,7 @@ mod tests {
11141114
do n.times {
11151115
do lock_rwlock_in_mode(x, mode) {
11161116
let oldval = *sharedstate;
1117-
task::yield();
1117+
task::deschedule();
11181118
*sharedstate = oldval + 1;
11191119
}
11201120
}

src/libstd/rt/kill.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ pub struct Death {
141141
on_exit: Option<~fn(bool)>,
142142
// nesting level counter for task::unkillable calls (0 == killable).
143143
unkillable: int,
144-
// nesting level counter for unstable::atomically calls (0 == can yield).
144+
// nesting level counter for unstable::atomically calls (0 == can deschedule).
145145
wont_sleep: int,
146146
// A "spare" handle to the kill flag inside the kill handle. Used during
147147
// blocking/waking as an optimization to avoid two xadds on the refcount.
@@ -572,16 +572,16 @@ impl Death {
572572
}
573573

574574
/// Enter a possibly-nested "atomic" section of code. Just for assertions.
575-
/// All calls must be paired with a subsequent call to allow_yield.
575+
/// All calls must be paired with a subsequent call to allow_deschedule.
576576
#[inline]
577-
pub fn inhibit_yield(&mut self) {
577+
pub fn inhibit_deschedule(&mut self) {
578578
self.wont_sleep += 1;
579579
}
580580

581581
/// Exit a possibly-nested "atomic" section of code. Just for assertions.
582-
/// All calls must be paired with a preceding call to inhibit_yield.
582+
/// All calls must be paired with a preceding call to inhibit_deschedule.
583583
#[inline]
584-
pub fn allow_yield(&mut self) {
584+
pub fn allow_deschedule(&mut self) {
585585
rtassert!(self.wont_sleep != 0);
586586
self.wont_sleep -= 1;
587587
}

src/libstd/select.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ mod test {
250250
let (c2, p3, c4) = x.take();
251251
p3.recv(); // handshake parent
252252
c4.send(()); // normal receive
253-
task::yield();
253+
task::deschedule();
254254
c2.send(()); // select receive
255255
}
256256

@@ -294,7 +294,7 @@ mod test {
294294
if send_on_chans.contains(&i) {
295295
let c = Cell::new(c);
296296
do spawntask_random {
297-
task::yield();
297+
task::deschedule();
298298
c.take().send(());
299299
}
300300
}

src/libstd/task/mod.rs

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -537,7 +537,7 @@ pub fn with_task_name<U>(blk: &fn(Option<&str>) -> U) -> U {
537537
}
538538
}
539539

540-
pub fn yield() {
540+
pub fn deschedule() {
541541
//! Yield control to the task scheduler
542542
543543
use rt::local::Local;
@@ -568,10 +568,10 @@ pub fn failing() -> bool {
568568
*
569569
* ~~~
570570
* do task::unkillable {
571-
* // detach / yield / destroy must all be called together
571+
* // detach / deschedule / destroy must all be called together
572572
* rustrt::rust_port_detach(po);
573573
* // This must not result in the current task being killed
574-
* task::yield();
574+
* task::deschedule();
575575
* rustrt::rust_port_destroy(po);
576576
* }
577577
* ~~~
@@ -689,7 +689,7 @@ fn test_spawn_unlinked_unsup_no_fail_down() { // grandchild sends on a port
689689
let ch = ch.clone();
690690
do spawn_unlinked {
691691
// Give middle task a chance to fail-but-not-kill-us.
692-
do 16.times { task::yield(); }
692+
do 16.times { task::deschedule(); }
693693
ch.send(()); // If killed first, grandparent hangs.
694694
}
695695
fail!(); // Shouldn't kill either (grand)parent or (grand)child.
@@ -712,7 +712,7 @@ fn test_spawn_unlinked_sup_no_fail_up() { // child unlinked fails
712712
do run_in_newsched_task {
713713
do spawn_supervised { fail!(); }
714714
// Give child a chance to fail-but-not-kill-us.
715-
do 16.times { task::yield(); }
715+
do 16.times { task::deschedule(); }
716716
}
717717
}
718718
#[ignore(reason = "linked failure")]
@@ -821,7 +821,7 @@ fn test_spawn_failure_propagate_grandchild() {
821821
do spawn_supervised {
822822
do spawn_supervised { block_forever(); }
823823
}
824-
do 16.times { task::yield(); }
824+
do 16.times { task::deschedule(); }
825825
fail!();
826826
};
827827
assert!(result.is_err());
@@ -838,7 +838,7 @@ fn test_spawn_failure_propagate_secondborn() {
838838
do spawn_supervised {
839839
do spawn { block_forever(); } // linked
840840
}
841-
do 16.times { task::yield(); }
841+
do 16.times { task::deschedule(); }
842842
fail!();
843843
};
844844
assert!(result.is_err());
@@ -855,7 +855,7 @@ fn test_spawn_failure_propagate_nephew_or_niece() {
855855
do spawn { // linked
856856
do spawn_supervised { block_forever(); }
857857
}
858-
do 16.times { task::yield(); }
858+
do 16.times { task::deschedule(); }
859859
fail!();
860860
};
861861
assert!(result.is_err());
@@ -872,7 +872,7 @@ fn test_spawn_linked_sup_propagate_sibling() {
872872
do spawn { // linked
873873
do spawn { block_forever(); } // linked
874874
}
875-
do 16.times { task::yield(); }
875+
do 16.times { task::deschedule(); }
876876
fail!();
877877
};
878878
assert!(result.is_err());
@@ -1169,12 +1169,12 @@ fn test_unkillable() {
11691169

11701170
// We want to do this after failing
11711171
do spawn_unlinked {
1172-
do 10.times { yield() }
1172+
do 10.times { deschedule() }
11731173
ch.send(());
11741174
}
11751175

11761176
do spawn {
1177-
yield();
1177+
deschedule();
11781178
// We want to fail after the unkillable task
11791179
// blocks on recv
11801180
fail!();
@@ -1205,12 +1205,12 @@ fn test_unkillable_nested() {
12051205

12061206
// We want to do this after failing
12071207
do spawn_unlinked || {
1208-
do 10.times { yield() }
1208+
do 10.times { deschedule() }
12091209
ch.send(());
12101210
}
12111211

12121212
do spawn {
1213-
yield();
1213+
deschedule();
12141214
// We want to fail after the unkillable task
12151215
// blocks on recv
12161216
fail!();
@@ -1277,7 +1277,7 @@ fn test_spawn_watched() {
12771277
t.unlinked();
12781278
t.watched();
12791279
do t.spawn {
1280-
task::yield();
1280+
task::deschedule();
12811281
fail!();
12821282
}
12831283
}
@@ -1313,7 +1313,7 @@ fn test_indestructible() {
13131313
t.unwatched();
13141314
do t.spawn {
13151315
p3.recv();
1316-
task::yield();
1316+
task::deschedule();
13171317
fail!();
13181318
}
13191319
c3.send(());

src/libstd/unstable/sync.rs

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -272,9 +272,9 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
272272

273273
/**
274274
* Enables a runtime assertion that no operation in the argument closure shall
275-
* use scheduler operations (yield, recv, spawn, etc). This is for use with
275+
* use scheduler operations (deschedule, recv, spawn, etc). This is for use with
276276
* pthread mutexes, which may block the entire scheduler thread, rather than
277-
* just one task, and is hence prone to deadlocks if mixed with yielding.
277+
* just one task, and is hence prone to deadlocks if mixed with descheduling.
278278
*
279279
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
280280
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
@@ -288,10 +288,10 @@ pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
288288
if in_green_task_context() {
289289
let t = Local::unsafe_borrow::<Task>();
290290
do (|| {
291-
(*t).death.inhibit_yield();
291+
(*t).death.inhibit_deschedule();
292292
f()
293293
}).finally {
294-
(*t).death.allow_yield();
294+
(*t).death.allow_deschedule();
295295
}
296296
} else {
297297
f()
@@ -349,7 +349,7 @@ struct ExData<T> {
349349
* This uses a pthread mutex, not one that's aware of the userspace scheduler.
350350
* The user of an Exclusive must be careful not to invoke any functions that may
351351
* reschedule the task while holding the lock, or deadlock may result. If you
352-
* need to block or yield while accessing shared state, use extra::sync::RWArc.
352+
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
353353
*/
354354
pub struct Exclusive<T> {
355355
x: UnsafeAtomicRcBox<ExData<T>>
@@ -377,7 +377,7 @@ impl<T:Send> Exclusive<T> {
377377
// Exactly like std::arc::MutexArc,access(), but with the LittleLock
378378
// instead of a proper mutex. Same reason for being unsafe.
379379
//
380-
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
380+
// Currently, scheduling operations (i.e., descheduling, receiving on a pipe,
381381
// accessing the provided condition variable) are prohibited while inside
382382
// the Exclusive. Supporting that is a work in progress.
383383
#[inline]
@@ -431,7 +431,7 @@ mod tests {
431431
fn test_atomically() {
432432
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
433433
// so we can't really test for the converse behaviour.
434-
unsafe { do atomically { } } task::yield(); // oughtn't fail
434+
unsafe { do atomically { } } task::deschedule(); // oughtn't fail
435435
}
436436

437437
#[test]
@@ -545,7 +545,7 @@ mod tests {
545545
c.send(());
546546
}
547547
p.recv();
548-
task::yield(); // Try to make the unwrapper get blocked first.
548+
task::deschedule(); // Try to make the unwrapper get blocked first.
549549
let left_x = x.try_unwrap();
550550
assert!(left_x.is_left());
551551
util::ignore(left_x);
@@ -566,7 +566,7 @@ mod tests {
566566
do task::spawn {
567567
let x2 = x2.take();
568568
unsafe { do x2.with |_hello| { } }
569-
task::yield();
569+
task::deschedule();
570570
}
571571
assert!(x.unwrap() == ~~"hello");
572572
@@ -612,7 +612,7 @@ mod tests {
612612
let x = Exclusive::new(~~"hello");
613613
let x2 = x.clone();
614614
do task::spawn {
615-
do 10.times { task::yield(); } // try to let the unwrapper go
615+
do 10.times { task::deschedule(); } // try to let the unwrapper go
616616
fail!(); // punt it awake from its deadlock
617617
}
618618
let _z = x.unwrap();

0 commit comments

Comments
 (0)