Rewrite reap_dead_tasks to never grab the sched lock before a task lock
Doing so contradicts the locking order used everywhere else and causes deadlocks. Un-XFAIL task-perf-spawnalot Closes #854
This commit is contained in:
parent
25416bfae1
commit
abdb6cd71b
3 changed files with 47 additions and 9 deletions
|
@ -100,25 +100,66 @@ rust_scheduler::number_of_live_tasks() {
|
||||||
void
|
void
|
||||||
rust_scheduler::reap_dead_tasks(int id) {
|
rust_scheduler::reap_dead_tasks(int id) {
|
||||||
I(this, lock.lock_held_by_current_thread());
|
I(this, lock.lock_held_by_current_thread());
|
||||||
for (size_t i = 0; i < dead_tasks.length(); ) {
|
if (dead_tasks.length() == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// First make up copy of the dead_task list with the lock held
|
||||||
|
size_t dead_tasks_len = dead_tasks.length();
|
||||||
|
rust_task **dead_tasks_copy = (rust_task**)
|
||||||
|
srv->malloc(sizeof(rust_task*) * dead_tasks_len);
|
||||||
|
for (size_t i = 0; i < dead_tasks_len; ++i) {
|
||||||
rust_task *task = dead_tasks[i];
|
rust_task *task = dead_tasks[i];
|
||||||
|
dead_tasks_copy[i] = task;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now drop the lock and futz with the tasks. This avoids establishing
|
||||||
|
// a sched->lock then task->lock locking order, which would be devestating
|
||||||
|
// to performance.
|
||||||
|
lock.unlock();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < dead_tasks_len; ++i) {
|
||||||
|
rust_task *task = dead_tasks_copy[i];
|
||||||
task->lock.lock();
|
task->lock.lock();
|
||||||
// Make sure this task isn't still running somewhere else...
|
// Make sure this task isn't still running somewhere else...
|
||||||
if (task->can_schedule(id)) {
|
if (task->can_schedule(id)) {
|
||||||
I(this, task->tasks_waiting_to_join.is_empty());
|
I(this, task->tasks_waiting_to_join.is_empty());
|
||||||
dead_tasks.remove(task);
|
|
||||||
DLOG(this, task,
|
DLOG(this, task,
|
||||||
"deleting unreferenced dead task %s @0x%" PRIxPTR,
|
"deleting unreferenced dead task %s @0x%" PRIxPTR,
|
||||||
task->name, task);
|
task->name, task);
|
||||||
task->lock.unlock();
|
task->lock.unlock();
|
||||||
|
} else {
|
||||||
|
task->lock.unlock();
|
||||||
|
dead_tasks_copy[i] = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now grab the lock again and remove the tasks that were truly dead
|
||||||
|
lock.lock();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < dead_tasks_len; ++i) {
|
||||||
|
rust_task *task = dead_tasks_copy[i];
|
||||||
|
if (task) {
|
||||||
|
dead_tasks.remove(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now unlock again because we have to actually free the dead tasks,
|
||||||
|
// and that may end up wanting to do lock the task and sched locks
|
||||||
|
// again (via target->send)
|
||||||
|
lock.unlock();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < dead_tasks_len; ++i) {
|
||||||
|
rust_task *task = dead_tasks_copy[i];
|
||||||
|
if (task) {
|
||||||
task->deref();
|
task->deref();
|
||||||
sync::decrement(kernel->live_tasks);
|
sync::decrement(kernel->live_tasks);
|
||||||
kernel->wakeup_schedulers();
|
kernel->wakeup_schedulers();
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
task->lock.unlock();
|
|
||||||
++i;
|
|
||||||
}
|
}
|
||||||
|
srv->free(dead_tasks_copy);
|
||||||
|
|
||||||
|
lock.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -90,6 +90,7 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
||||||
|
|
||||||
rust_task::~rust_task()
|
rust_task::~rust_task()
|
||||||
{
|
{
|
||||||
|
I(sched, !sched->lock.lock_held_by_current_thread());
|
||||||
DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
||||||
name, (uintptr_t)this, ref_count);
|
name, (uintptr_t)this, ref_count);
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
// xfail-stage1
|
|
||||||
// xfail-stage2
|
|
||||||
// xfail-stage3
|
|
||||||
|
|
||||||
use std;
|
use std;
|
||||||
import std::vec;
|
import std::vec;
|
||||||
import std::task;
|
import std::task;
|
||||||
|
|
Loading…
Reference in a new issue