rt: Use rust_task_thread's C-stack pool for native calls

This commit is contained in:
Brian Anderson 2012-02-09 01:13:32 -08:00
parent bfb80064d2
commit 79b1563abb
6 changed files with 99 additions and 31 deletions

View file

@ -29,8 +29,6 @@ struct registers_t {
uint32_t eip;
} __attribute__((aligned(16)));
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
class context {
public:
registers_t regs;
@ -41,10 +39,6 @@ public:
void swap(context &out);
void call(void *f, void *arg, void *sp);
void call_and_change_stacks(void *args, void *fn_ptr) {
__morestack(args, fn_ptr, regs.esp);
}
};
#endif

View file

@ -28,8 +28,6 @@ struct registers_t {
uint64_t data[RUSTRT_MAX];
} __attribute__((aligned(16)));
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
class context {
public:
registers_t regs;
@ -40,10 +38,6 @@ public:
void swap(context &out);
void call(void *f, void *arg, void *sp);
void call_and_change_stacks(void *args, void *fn_ptr) {
__morestack(args, fn_ptr, regs.data[RUSTRT_RSP]);
}
};
#endif

View file

@ -88,7 +88,9 @@ rust_task::rust_task(rust_task_thread *thread, rust_task_list *state,
propagate_failure(true),
dynastack(this),
cc_counter(0),
total_stack_sz(0)
total_stack_sz(0),
c_stack(NULL),
next_c_sp(0)
{
LOGPTR(thread, "new task", (uintptr_t)this);
DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
@ -166,7 +168,6 @@ cleanup_task(cleanup_args *args) {
}
// This runs on the Rust stack
extern "C" CDECL
void task_start_wrapper(spawn_args *a)
{
rust_task *task = a->task;
@ -180,8 +181,15 @@ void task_start_wrapper(spawn_args *a)
A(task->thread, ex == task,
"Expected this task to be thrown for unwinding");
threw_exception = true;
if (task->c_stack) {
task->return_c_stack();
}
}
// We should have returned any C stack by now
I(task->thread, task->c_stack == NULL);
rust_opaque_box* env = a->envptr;
if(env) {
// free the environment (which should be a unique closure).
@ -722,10 +730,35 @@ rust_task::config_notify(chan_handle chan) {
notify_chan = chan;
}
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
void
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
I(thread, on_rust_stack());
thread->c_context.call_and_change_stacks(args, fn_ptr);
bool borrowed_a_c_stack = false;
if (c_stack == NULL) {
c_stack = thread->borrow_c_stack();
next_c_sp = align_down(c_stack->end);
borrowed_a_c_stack = true;
}
__morestack(args, fn_ptr, next_c_sp);
// Note that we may not actually get here if we threw an exception,
// in which case we will return the c stack when the exception is caught.
if (borrowed_a_c_stack) {
return_c_stack();
}
}
void
rust_task::return_c_stack() {
I(thread, on_rust_stack());
I(thread, c_stack != NULL);
thread->return_c_stack(c_stack);
c_stack = NULL;
next_c_sp = 0;
}
//

View file

@ -36,6 +36,8 @@ typedef unsigned long task_result;
#define tr_success 0
#define tr_failure 1
struct spawn_args;
// std::lib::task::task_notification
//
// since it's currently a unary tag, we only add the fields.
@ -104,6 +106,11 @@ rust_task : public kernel_owned<rust_task>, rust_cond
size_t total_stack_sz;
private:
// The stack used for running C code, borrowed from the scheduler thread
stk_seg *c_stack;
uintptr_t next_c_sp;
// Called when the atomic refcount reaches zero
void delete_this();
@ -112,6 +119,10 @@ private:
void free_stack(stk_seg *stk);
size_t get_next_stack_size(size_t min, size_t current, size_t requested);
void return_c_stack();
friend void task_start_wrapper(spawn_args *a);
public:
// Only a pointer to 'name' is kept, so it must live as long as this task.

View file

@ -61,10 +61,6 @@ rust_task_thread::~rust_task_thread() {
#ifndef __WIN32__
pthread_attr_destroy(&attr);
#endif
if (cached_c_stack) {
destroy_stack(kernel, cached_c_stack);
}
}
void
@ -72,7 +68,9 @@ rust_task_thread::activate(rust_task *task) {
task->ctx.next = &c_context;
DLOG(this, task, "descheduling...");
lock.unlock();
prepare_c_stack();
task->ctx.swap(c_context);
unprepare_c_stack();
lock.lock();
DLOG(this, task, "task has returned");
}
@ -287,6 +285,13 @@ rust_task_thread::start_main_loop() {
DLOG(this, dom, "finished main-loop %d", id);
lock.unlock();
I(this, !extra_c_stack);
if (cached_c_stack) {
unconfig_valgrind_stack(cached_c_stack);
destroy_stack(kernel, cached_c_stack);
cached_c_stack = NULL;
}
}
rust_crate_cache *
@ -374,24 +379,51 @@ rust_task_thread::exit() {
lock.signal();
}
stk_seg *
rust_task_thread::borrow_c_stack() {
if (cached_c_stack) {
stk_seg *your_stack = cached_c_stack;
cached_c_stack = NULL;
return your_stack;
} else {
return create_stack(kernel, C_STACK_SIZE);
// Before activating each task, make sure we have a C stack available.
// It needs to be allocated ahead of time (while we're on our own
// stack), because once we're on the Rust stack we won't have enough
// room to do the allocation
void
rust_task_thread::prepare_c_stack() {
I(this, !extra_c_stack);
if (!cached_c_stack) {
cached_c_stack = create_stack(kernel, C_STACK_SIZE);
}
}
void
rust_task_thread::return_c_stack(stk_seg *stack) {
if (cached_c_stack) {
destroy_stack(kernel, stack);
rust_task_thread::unprepare_c_stack() {
if (extra_c_stack) {
destroy_stack(kernel, extra_c_stack);
extra_c_stack = NULL;
}
}
// NB: Runs on the Rust stack
stk_seg *
rust_task_thread::borrow_c_stack() {
I(this, cached_c_stack);
stk_seg *your_stack;
if (extra_c_stack) {
your_stack = extra_c_stack;
extra_c_stack = NULL;
} else {
your_stack = cached_c_stack;
cached_c_stack = NULL;
}
config_valgrind_stack(your_stack);
return your_stack;
}
// NB: Runs on the Rust stack
void
rust_task_thread::return_c_stack(stk_seg *stack) {
I(this, !extra_c_stack);
unconfig_valgrind_stack(stack);
if (!cached_c_stack) {
cached_c_stack = stack;
} else {
extra_c_stack = stack;
}
}

View file

@ -95,6 +95,10 @@ struct rust_task_thread : public kernel_owned<rust_task_thread>,
private:
stk_seg *cached_c_stack;
stk_seg *extra_c_stack;
void prepare_c_stack();
void unprepare_c_stack();
public: