diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs index afb4cf3943aba..1952c35eb9df2 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync.rs @@ -135,9 +135,7 @@ impl Sem { do task::unkillable { do (|| { self.acquire(); - unsafe { - do task::rekillable { blk() } - } + do task::rekillable { blk() } }).finally { self.release(); } @@ -234,10 +232,8 @@ impl<'self> Condvar<'self> { // signaller already sent -- I mean 'unconditionally' in contrast // with acquire().) do (|| { - unsafe { - do task::rekillable { - let _ = WaitEnd.take_unwrap().recv(); - } + do task::rekillable { + let _ = WaitEnd.take_unwrap().recv(); } }).finally { // Reacquire the condvar. Note this is back in the unkillable @@ -516,14 +512,12 @@ impl RWLock { * 'write' from other tasks will run concurrently with this one. */ pub fn write(&self, blk: &fn() -> U) -> U { - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - do (&self.access_lock).access { - (&self.order_lock).release(); - do task::rekillable { - blk() - } + do task::unkillable { + (&self.order_lock).acquire(); + do (&self.access_lock).access { + (&self.order_lock).release(); + do task::rekillable { + blk() } } } @@ -562,16 +556,14 @@ impl RWLock { // which can't happen until T2 finishes the downgrade-read entirely. // The astute reader will also note that making waking writers use the // order_lock is better for not starving readers. - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - do (&self.access_lock).access_cond |cond| { - (&self.order_lock).release(); - do task::rekillable { - let opt_lock = Just(&self.order_lock); - blk(&Condvar { sem: cond.sem, order: opt_lock, - token: NonCopyable::new() }) - } + do task::unkillable { + (&self.order_lock).acquire(); + do (&self.access_lock).access_cond |cond| { + (&self.order_lock).release(); + do task::rekillable { + let opt_lock = Just(&self.order_lock); + blk(&Condvar { sem: cond.sem, order: opt_lock, + token: NonCopyable::new() }) } } } @@ -606,10 +598,8 @@ impl RWLock { (&self.access_lock).acquire(); (&self.order_lock).release(); do (|| { - unsafe { - do task::rekillable { - blk(RWLockWriteMode { lock: self, token: NonCopyable::new() }) - } + do task::rekillable { + blk(RWLockWriteMode { lock: self, token: NonCopyable::new() }) } }).finally { let writer_or_last_reader; diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs index b0b425e3aee4a..94df621ce7666 100644 --- a/src/libstd/rt/kill.rs +++ b/src/libstd/rt/kill.rs @@ -647,7 +647,11 @@ impl Death { /// All calls must be paired with a preceding call to inhibit_kill. #[inline] pub fn allow_kill(&mut self, already_failing: bool) { - rtassert!(self.unkillable != 0); + if self.unkillable == 0 { + // we need to decrement the counter before failing. + self.unkillable -= 1; + fail!("Cannot enter a rekillable() block without a surrounding unkillable()"); + } self.unkillable -= 1; if self.unkillable == 0 { rtassert!(self.kill_handle.is_some()); diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index e76b81a904df2..f872c2614b9f6 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -597,21 +597,36 @@ pub fn unkillable(f: &fn() -> U) -> U { } } -/// The inverse of unkillable. Only ever to be used nested in unkillable(). -pub unsafe fn rekillable(f: &fn() -> U) -> U { +/** + * Makes killable a task marked as unkillable. This + * is meant to be used only nested in unkillable. + * + * # Example + * + * ~~~ + * do task::unkillable { + * do task::rekillable { + * // Task is killable + * } + * // Task is unkillable again + * } + */ +pub fn rekillable(f: &fn() -> U) -> U { use rt::task::Task; - if in_green_task_context() { - let t = Local::unsafe_borrow::(); - do (|| { - (*t).death.allow_kill((*t).unwinder.unwinding); + unsafe { + if in_green_task_context() { + let t = Local::unsafe_borrow::(); + do (|| { + (*t).death.allow_kill((*t).unwinder.unwinding); + f() + }).finally { + (*t).death.inhibit_kill((*t).unwinder.unwinding); + } + } else { + // FIXME(#3095): As in unkillable(). f() - }).finally { - (*t).death.inhibit_kill((*t).unwinder.unwinding); } - } else { - // FIXME(#3095): As in unkillable(). - f() } } @@ -636,8 +651,8 @@ fn test_kill_unkillable_task() { } } -#[ignore(reason = "linked failure")] #[test] +#[ignore(cfg(windows))] fn test_kill_rekillable_task() { use rt::test::*; @@ -646,11 +661,9 @@ fn test_kill_rekillable_task() { do run_in_newsched_task { do task::try { do task::unkillable { - unsafe { - do task::rekillable { - do task::spawn { - fail!(); - } + do task::rekillable { + do task::spawn { + fail!(); } } } @@ -658,7 +671,39 @@ fn test_kill_rekillable_task() { } } -#[test] #[should_fail] +#[test] +#[should_fail] +#[ignore(cfg(windows))] +fn test_rekillable_not_nested() { + do rekillable { + // This should fail before + // receiving anything since + // this block should be nested + // into a unkillable block. + deschedule(); + } +} + + +#[test] +#[ignore(cfg(windows))] +fn test_rekillable_nested_failure() { + + let result = do task::try { + do unkillable { + do rekillable { + let (port,chan) = comm::stream(); + do task::spawn { chan.send(()); fail!(); } + port.recv(); // wait for child to exist + port.recv(); // block forever, expect to get killed. + } + } + }; + assert!(result.is_err()); +} + + +#[test] #[should_fail] #[ignore(cfg(windows))] fn test_cant_dup_task_builder() { let mut builder = task(); builder.unlinked();