diff --git a/src/rt/rust_kernel.cpp b/src/rt/rust_kernel.cpp index 7b1adf3ef4c..331aaf85a94 100644 --- a/src/rt/rust_kernel.cpp +++ b/src/rt/rust_kernel.cpp @@ -162,7 +162,7 @@ rust_kernel::wait_for_schedulers() rust_scheduler *sched = iter->second; sched_table.erase(iter); sched->join_task_threads(); - delete sched; + sched->deref(); if (sched_table.size() == 1) { KLOG_("Allowing osmain scheduler to exit"); // It's only the osmain scheduler left. Tell it to exit @@ -197,23 +197,21 @@ rust_kernel::fail() { #if defined(__WIN32__) exit(rval); #endif - // Copy the list of schedulers so that we don't hold the lock while - // running kill_all_tasks. // I think this only needs to be done by one task ever; as it is, // multiple tasks invoking kill_all might get here. Currently libcore // ensures only one task will ever invoke it, but this would really be // fine either way, so I'm leaving it as it is. -- bblum - // FIXME (#2671): There's a lot that happens under kill_all_tasks, - // and I don't know that holding sched_lock here is ok, but we need - // to hold the sched lock to prevent the scheduler from being - // destroyed while we are using it. Probably we need to make - // rust_scheduler atomicly reference counted. + + // Copy the list of schedulers so that we don't hold the lock while + // running kill_all_tasks. Refcount to ensure they stay alive. std::vector scheds; { scoped_lock with(sched_lock); + // All schedulers created after this flag is set will be doomed. killed = true; for (sched_map::iterator iter = sched_table.begin(); iter != sched_table.end(); iter++) { + iter->second->ref(); scheds.push_back(iter->second); } } @@ -221,6 +219,7 @@ rust_kernel::fail() { for (std::vector::iterator iter = scheds.begin(); iter != scheds.end(); iter++) { (*iter)->kill_all_tasks(); + (*iter)->deref(); } } diff --git a/src/rt/rust_scheduler.cpp b/src/rt/rust_scheduler.cpp index dc662b009ac..5dd1a261c0e 100644 --- a/src/rt/rust_scheduler.cpp +++ b/src/rt/rust_scheduler.cpp @@ -11,6 +11,7 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel, bool allow_exit, bool killed, rust_sched_launcher_factory *launchfac) : + ref_count(1), kernel(kernel), live_threads(num_threads), live_tasks(0), @@ -22,8 +23,9 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel, create_task_threads(launchfac, killed); } -rust_scheduler::~rust_scheduler() { +void rust_scheduler::delete_this() { destroy_task_threads(); + delete this; } rust_sched_launcher * diff --git a/src/rt/rust_scheduler.h b/src/rt/rust_scheduler.h index 699354f6d78..767ecaf7d1e 100644 --- a/src/rt/rust_scheduler.h +++ b/src/rt/rust_scheduler.h @@ -11,11 +11,13 @@ #include "rust_globals.h" #include "util/array_list.h" #include "rust_kernel.h" +#include "rust_refcount.h" class rust_sched_launcher; class rust_sched_launcher_factory; class rust_scheduler : public kernel_owned { + RUST_ATOMIC_REFCOUNT(); // FIXME (#2693): Make these private public: rust_kernel *kernel; @@ -45,11 +47,13 @@ private: void exit(); + // Called when refcount reaches zero + void delete_this(); + public: rust_scheduler(rust_kernel *kernel, size_t num_threads, rust_sched_id id, bool allow_exit, bool killed, rust_sched_launcher_factory *launchfac); - ~rust_scheduler(); void start_task_threads(); void join_task_threads(); diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h index e9538a5abd5..2db882d11e3 100644 --- a/src/rt/rust_task.h +++ b/src/rt/rust_task.h @@ -122,8 +122,6 @@ rust_task_fail(rust_task *task, struct rust_task : public kernel_owned { - // FIXME(#1789) Refcounting no longer seems needed now that tasks don't - // ref their parents. I'll leave it in just in case... -- bblum RUST_ATOMIC_REFCOUNT(); rust_task_id id;