Temporary workaround to prevent taskgroup cleanup code from failing without an exception handler.

This commit is contained in:
Ben Blum 2013-07-31 19:48:38 -04:00
parent aeaed77301
commit 963d37e821
2 changed files with 13 additions and 3 deletions

View file

@ -72,6 +72,7 @@ use either::{Either, Left, Right};
use option::{Option, Some, None};
use prelude::*;
use rt::task::Task;
use task::spawn::Taskgroup;
use to_bytes::IterBytes;
use unstable::atomics::{AtomicUint, Relaxed};
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
@ -474,7 +475,7 @@ impl Death {
}
/// Collect failure exit codes from children and propagate them to a parent.
pub fn collect_failure(&mut self, mut success: bool) {
pub fn collect_failure(&mut self, mut success: bool, group: Option<Taskgroup>) {
// This may run after the task has already failed, so even though the
// task appears to need to be killed, the scheduler should not fail us
// when we block to unwrap.
@ -484,6 +485,10 @@ impl Death {
rtassert!(self.unkillable == 0);
self.unkillable = 1;
// FIXME(#7544): See corresponding fixme at the callsite in task.rs.
// NB(#8192): Doesn't work with "let _ = ..."
{ use util; util::ignore(group); }
// Step 1. Decide if we need to collect child failures synchronously.
do self.on_exit.take_map |on_exit| {
if success {

View file

@ -129,8 +129,13 @@ impl Task {
}
self.unwinder.try(f);
{ let _ = self.taskgroup.take(); }
self.death.collect_failure(!self.unwinder.unwinding);
// FIXME(#7544): We pass the taskgroup into death so that it can be
// dropped while the unkillable counter is set. This should not be
// necessary except for an extraneous clone() in task/spawn.rs that
// causes a killhandle to get dropped, which mustn't receive a kill
// signal since we're outside of the unwinder's try() scope.
// { let _ = self.taskgroup.take(); }
self.death.collect_failure(!self.unwinder.unwinding, self.taskgroup.take());
self.destroy();
}