From 9fa950ec53dc7428d2d4a20ba56a50c21d5606a5 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 4 Feb 2012 00:03:45 -0800 Subject: [PATCH] rt: Stop using atomic ops on rust_kernel::live_tasks These ops are all done within spitting distance of a suitable lock, so just protect it with the lock. --- src/rt/rust_kernel.cpp | 6 ++++-- src/rt/rust_kernel.h | 7 ++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/rt/rust_kernel.cpp b/src/rt/rust_kernel.cpp index 367b76a9136..33191c2f553 100644 --- a/src/rt/rust_kernel.cpp +++ b/src/rt/rust_kernel.cpp @@ -84,25 +84,27 @@ rust_kernel::fail() { void rust_kernel::register_task(rust_task *task) { + int new_live_tasks; { scoped_lock with(task_lock); task->user.id = max_task_id++; task_table.put(task->user.id, task); + new_live_tasks = ++live_tasks; } K(srv, task->user.id != INTPTR_MAX, "Hit the maximum task id"); KLOG_("Registered task %" PRIdPTR, task->user.id); - int new_live_tasks = sync::increment(live_tasks); KLOG_("Total outstanding tasks: %d", new_live_tasks); } void rust_kernel::release_task_id(rust_task_id id) { KLOG_("Releasing task %" PRIdPTR, id); + int new_live_tasks; { scoped_lock with(task_lock); task_table.remove(id); + new_live_tasks = --live_tasks; } - int new_live_tasks = sync::decrement(live_tasks); KLOG_("Total outstanding tasks: %d", new_live_tasks); if (new_live_tasks == 0) { // There are no more tasks and there never will be. diff --git a/src/rt/rust_kernel.h b/src/rt/rust_kernel.h index ff892aea303..917444b2bbd 100644 --- a/src/rt/rust_kernel.h +++ b/src/rt/rust_kernel.h @@ -22,12 +22,13 @@ public: private: rust_scheduler *sched; + // Protects live_tasks, max_task_id and task_table + lock_and_signal task_lock; // Tracks the number of tasks that are being managed by // schedulers. When this hits 0 we will tell all schedulers // to exit. - volatile int live_tasks; - // Protects max_task_id and task_table - lock_and_signal task_lock; + int live_tasks; + // The next task id rust_task_id max_task_id; hash_map task_table;