diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h index 0a1b02037a2..7594e677bb0 100644 --- a/src/rt/rust_task.h +++ b/src/rt/rust_task.h @@ -205,6 +205,7 @@ public: void call_on_c_stack(void *args, void *fn_ptr); void call_on_rust_stack(void *args, void *fn_ptr); + bool have_c_stack() { return c_stack != NULL; } }; // This stuff is on the stack-switching fast path diff --git a/src/rt/rust_task_thread.cpp b/src/rt/rust_task_thread.cpp index fbcd164707f..dde16ad70bd 100644 --- a/src/rt/rust_task_thread.cpp +++ b/src/rt/rust_task_thread.cpp @@ -70,7 +70,7 @@ rust_task_thread::activate(rust_task *task) { task->ctx.next = &c_context; DLOG(this, task, "descheduling..."); lock.unlock(); - prepare_c_stack(); + prepare_c_stack(task); task->ctx.swap(c_context); unprepare_c_stack(); lock.lock(); @@ -367,9 +367,9 @@ rust_task_thread::exit() { // stack), because once we're on the Rust stack we won't have enough // room to do the allocation void -rust_task_thread::prepare_c_stack() { +rust_task_thread::prepare_c_stack(rust_task *task) { I(this, !extra_c_stack); - if (!cached_c_stack) { + if (!cached_c_stack && !task->have_c_stack()) { cached_c_stack = create_stack(kernel, C_STACK_SIZE); prepare_valgrind_stack(cached_c_stack); } diff --git a/src/rt/rust_task_thread.h b/src/rt/rust_task_thread.h index b1a56dfa68c..58ae8aa12f5 100644 --- a/src/rt/rust_task_thread.h +++ b/src/rt/rust_task_thread.h @@ -98,7 +98,7 @@ private: stk_seg *cached_c_stack; stk_seg *extra_c_stack; - void prepare_c_stack(); + void prepare_c_stack(rust_task *task); void unprepare_c_stack(); public: