Auto merge of #24447 - alexcrichton:audit-thread, r=aturon

Much of this code hasn't been updated in quite some time and this commit does a
small audit of the functionality:

* Implementation functions now centralize all functionality on a locally defined
  `Thread` type.
* The `detach` method has been removed in favor of a `Drop` implementation. This
  notably fixes leaking thread handles on Windows.
* The `Thread` structure is now appropriately annotated with `Send` and `Sync`
  automatically on Windows and in a custom fashion on Unix.
* The unsafety of creating a thread has been pushed out to the right boundaries
  now.

Closes #24442
This commit is contained in:
bors 2015-04-22 18:48:31 +00:00
commit 5c9636975c
5 changed files with 299 additions and 296 deletions

View file

@ -10,22 +10,22 @@
use prelude::v1::*;
use usize;
use alloc::boxed::FnBox;
use libc;
use thunk::Thunk;
use sys_common::stack;
use sys::stack_overflow;
use sys_common::stack;
use usize;
// This is the starting point of rust os threads. The first thing we do
// is make sure that we don't trigger __morestack (also why this has a
// no_stack_check annotation), and then we extract the main function
// and invoke it.
#[no_stack_check]
pub fn start_thread(main: *mut libc::c_void) {
unsafe {
stack::record_os_managed_stack_bounds(0, usize::MAX);
let _handler = stack_overflow::Handler::new();
let main: Box<Thunk> = Box::from_raw(main as *mut Thunk);
main();
}
pub unsafe fn start_thread(main: *mut libc::c_void) {
// First ensure that we don't trigger __morestack (also why this has a
// no_stack_check annotation).
stack::record_os_managed_stack_bounds(0, usize::MAX);
// Next, set up our stack overflow handler which may get triggered if we run
// out of stack.
let _handler = stack_overflow::Handler::new();
// Finally, let's run some code.
Box::from_raw(main as *mut Box<FnBox()>)()
}

View file

@ -10,8 +10,9 @@
#![allow(dead_code)]
use core::prelude::*;
use prelude::v1::*;
use alloc::boxed::FnBox;
use cmp;
use ffi::CString;
use io;
@ -20,13 +21,148 @@ use libc;
use mem;
use ptr;
use sys::os;
use thunk::Thunk;
use time::Duration;
use sys_common::stack::RED_ZONE;
use sys_common::thread::*;
pub type rust_thread = libc::pthread_t;
pub struct Thread {
id: libc::pthread_t,
}
// Some platforms may have pthread_t as a pointer in which case we still want
// a thread to be Send/Sync
unsafe impl Send for Thread {}
unsafe impl Sync for Thread {}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
let mut native: libc::pthread_t = mem::zeroed();
let mut attr: libc::pthread_attr_t = mem::zeroed();
assert_eq!(pthread_attr_init(&mut attr), 0);
// Reserve room for the red zone, the runtime's stack of last resort.
let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr));
match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
0 => {}
n => {
assert_eq!(n, libc::EINVAL);
// EINVAL means |stack_size| is either too small or not a
// multiple of the system page size. Because it's definitely
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
// Round up to the nearest page and try again.
let page_size = os::page_size();
let stack_size = (stack_size + page_size - 1) &
(-(page_size as isize - 1) as usize - 1);
let stack_size = stack_size as libc::size_t;
assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size), 0);
}
};
let ret = pthread_create(&mut native, &attr, thread_start,
&*p as *const _ as *mut _);
assert_eq!(pthread_attr_destroy(&mut attr), 0);
return if ret != 0 {
Err(io::Error::from_raw_os_error(ret))
} else {
mem::forget(p); // ownership passed to pthread_create
Ok(Thread { id: native })
};
#[no_stack_check]
extern fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
unsafe { start_thread(main); }
0 as *mut _
}
}
pub fn yield_now() {
let ret = unsafe { sched_yield() };
debug_assert_eq!(ret, 0);
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn set_name(name: &str) {
// pthread wrapper only appeared in glibc 2.12, so we use syscall
// directly.
extern {
fn prctl(option: libc::c_int, arg2: libc::c_ulong,
arg3: libc::c_ulong, arg4: libc::c_ulong,
arg5: libc::c_ulong) -> libc::c_int;
}
const PR_SET_NAME: libc::c_int = 15;
let cname = CString::new(name).unwrap_or_else(|_| {
panic!("thread name may not contain interior null bytes")
});
unsafe {
prctl(PR_SET_NAME, cname.as_ptr() as libc::c_ulong, 0, 0, 0);
}
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
pub fn set_name(name: &str) {
extern {
fn pthread_set_name_np(tid: libc::pthread_t,
name: *const libc::c_char);
}
let cname = CString::new(name).unwrap();
unsafe {
pthread_set_name_np(pthread_self(), cname.as_ptr());
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub fn set_name(name: &str) {
extern {
fn pthread_setname_np(name: *const libc::c_char) -> libc::c_int;
}
let cname = CString::new(name).unwrap();
unsafe {
pthread_setname_np(cname.as_ptr());
}
}
pub fn sleep(dur: Duration) {
if dur < Duration::zero() {
return Thread::yield_now()
}
let seconds = dur.num_seconds();
let ns = dur - Duration::seconds(seconds);
let mut ts = libc::timespec {
tv_sec: seconds as libc::time_t,
tv_nsec: ns.num_nanoseconds().unwrap() as libc::c_long,
};
// If we're awoken with a signal then the return value will be -1 and
// nanosleep will fill in `ts` with the remaining time.
unsafe {
while libc::nanosleep(&ts, &mut ts) == -1 {
assert_eq!(os::errno(), libc::EINTR);
}
}
}
pub fn join(self) {
unsafe {
let ret = pthread_join(self.id, ptr::null_mut());
mem::forget(self);
debug_assert_eq!(ret, 0);
}
}
}
impl Drop for Thread {
fn drop(&mut self) {
let ret = unsafe { pthread_detach(self.id) };
debug_assert_eq!(ret, 0);
}
}
#[cfg(all(not(target_os = "linux"),
not(target_os = "macos"),
@ -183,128 +319,6 @@ pub mod guard {
}
}
pub unsafe fn create(stack: usize, p: Thunk) -> io::Result<rust_thread> {
let p = box p;
let mut native: libc::pthread_t = mem::zeroed();
let mut attr: libc::pthread_attr_t = mem::zeroed();
assert_eq!(pthread_attr_init(&mut attr), 0);
// Reserve room for the red zone, the runtime's stack of last resort.
let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as usize);
match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
0 => {}
n => {
assert_eq!(n, libc::EINVAL);
// EINVAL means |stack_size| is either too small or not a
// multiple of the system page size. Because it's definitely
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
// Round up to the nearest page and try again.
let page_size = os::page_size();
let stack_size = (stack_size + page_size - 1) &
(-(page_size as isize - 1) as usize - 1);
assert_eq!(pthread_attr_setstacksize(&mut attr,
stack_size as libc::size_t), 0);
}
};
let ret = pthread_create(&mut native, &attr, thread_start,
&*p as *const _ as *mut _);
assert_eq!(pthread_attr_destroy(&mut attr), 0);
return if ret != 0 {
Err(io::Error::from_raw_os_error(ret))
} else {
mem::forget(p); // ownership passed to pthread_create
Ok(native)
};
#[no_stack_check]
extern fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
start_thread(main);
0 as *mut _
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub unsafe fn set_name(name: &str) {
// pthread wrapper only appeared in glibc 2.12, so we use syscall directly.
extern {
fn prctl(option: libc::c_int, arg2: libc::c_ulong, arg3: libc::c_ulong,
arg4: libc::c_ulong, arg5: libc::c_ulong) -> libc::c_int;
}
const PR_SET_NAME: libc::c_int = 15;
let cname = CString::new(name).unwrap_or_else(|_| {
panic!("thread name may not contain interior null bytes")
});
prctl(PR_SET_NAME, cname.as_ptr() as libc::c_ulong, 0, 0, 0);
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
pub unsafe fn set_name(name: &str) {
extern {
fn pthread_set_name_np(tid: libc::pthread_t, name: *const libc::c_char);
}
let cname = CString::new(name).unwrap();
pthread_set_name_np(pthread_self(), cname.as_ptr());
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub unsafe fn set_name(name: &str) {
extern {
fn pthread_setname_np(name: *const libc::c_char) -> libc::c_int;
}
let cname = CString::new(name).unwrap();
pthread_setname_np(cname.as_ptr());
}
pub unsafe fn join(native: rust_thread) {
assert_eq!(pthread_join(native, ptr::null_mut()), 0);
}
pub unsafe fn detach(native: rust_thread) {
assert_eq!(pthread_detach(native), 0);
}
pub unsafe fn yield_now() {
assert_eq!(sched_yield(), 0);
}
pub fn sleep(dur: Duration) {
unsafe {
if dur < Duration::zero() {
return yield_now()
}
let seconds = dur.num_seconds();
let ns = dur - Duration::seconds(seconds);
let mut ts = libc::timespec {
tv_sec: seconds as libc::time_t,
tv_nsec: ns.num_nanoseconds().unwrap() as libc::c_long,
};
// If we're awoken with a signal then the return value will be -1 and
// nanosleep will fill in `ts` with the remaining time.
while dosleep(&mut ts) == -1 {
assert_eq!(os::errno(), libc::EINTR);
}
}
#[cfg(target_os = "linux")]
unsafe fn dosleep(ts: *mut libc::timespec) -> libc::c_int {
extern {
fn clock_nanosleep(clock_id: libc::c_int, flags: libc::c_int,
request: *const libc::timespec,
remain: *mut libc::timespec) -> libc::c_int;
}
clock_nanosleep(libc::CLOCK_MONOTONIC, 0, ts, ts)
}
#[cfg(not(target_os = "linux"))]
unsafe fn dosleep(ts: *mut libc::timespec) -> libc::c_int {
libc::nanosleep(ts, ts)
}
}
// glibc >= 2.15 has a __pthread_get_minstack() function that returns
// PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
// storage. We need that information to avoid blowing up when a small stack
@ -319,7 +333,7 @@ pub fn sleep(dur: Duration) {
// but that caused Debian to detect an unnecessarily strict versioned
// dependency on libc6 (#23628).
#[cfg(target_os = "linux")]
fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
use dynamic_lib::DynamicLibrary;
use sync::{Once, ONCE_INIT};
@ -337,16 +351,16 @@ fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
});
match unsafe { __pthread_get_minstack } {
None => PTHREAD_STACK_MIN,
Some(f) => unsafe { f(attr) },
None => PTHREAD_STACK_MIN as usize,
Some(f) => unsafe { f(attr) as usize },
}
}
// No point in looking up __pthread_get_minstack() on non-glibc
// platforms.
#[cfg(not(target_os = "linux"))]
fn min_stack_size(_: *const libc::pthread_attr_t) -> libc::size_t {
PTHREAD_STACK_MIN
fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
PTHREAD_STACK_MIN as usize
}
extern {

View file

@ -471,6 +471,17 @@ extern "system" {
hWritePipe: libc::LPHANDLE,
lpPipeAttributes: libc::LPSECURITY_ATTRIBUTES,
nSize: libc::DWORD) -> libc::BOOL;
pub fn CreateThread(lpThreadAttributes: libc::LPSECURITY_ATTRIBUTES,
dwStackSize: libc::SIZE_T,
lpStartAddress: extern "system" fn(*mut libc::c_void)
-> libc::DWORD,
lpParameter: libc::LPVOID,
dwCreationFlags: libc::DWORD,
lpThreadId: libc::LPDWORD) -> libc::HANDLE;
pub fn WaitForSingleObject(hHandle: libc::HANDLE,
dwMilliseconds: libc::DWORD) -> libc::DWORD;
pub fn SwitchToThread() -> libc::BOOL;
pub fn Sleep(dwMilliseconds: libc::DWORD);
}
#[link(name = "userenv")]

View file

@ -10,102 +10,91 @@
use prelude::v1::*;
use alloc::boxed::FnBox;
use cmp;
use io;
use libc::{self, c_void};
use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
LPVOID, DWORD, LPDWORD, HANDLE};
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::stack::RED_ZONE;
use sys_common::thread::*;
use thunk::Thunk;
use time::Duration;
pub type rust_thread = HANDLE;
pub struct Thread {
handle: Handle
}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// For now, the only requirement is that it's big enough to hold the
// red zone. Round up to the next 64 kB because that's what the NT
// kernel does, might as well make it explicit. With the current
// 20 kB red zone, that makes for a 64 kB minimum stack.
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else {
mem::forget(p); // ownership passed to CreateThread
Ok(Thread { handle: Handle::new(ret) })
};
#[no_stack_check]
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
unsafe { start_thread(main); }
0
}
}
pub fn set_name(_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub fn join(self) {
use libc::consts::os::extra::INFINITE;
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
}
pub fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
unsafe { c::SwitchToThread(); }
}
pub fn sleep(dur: Duration) {
unsafe {
if dur < Duration::zero() {
return Thread::yield_now()
}
let ms = dur.num_milliseconds();
// if we have a fractional number of milliseconds then add an extra
// millisecond to sleep for
let extra = dur - Duration::milliseconds(ms);
let ms = ms + if extra.is_zero() {0} else {1};
c::Sleep(ms as DWORD);
}
}
}
pub mod guard {
pub unsafe fn main() -> usize { 0 }
pub unsafe fn current() -> usize { 0 }
pub unsafe fn init() {}
}
pub unsafe fn create(stack: usize, p: Thunk) -> io::Result<rust_thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// For now, the only requirement is that it's big enough to hold the
// red zone. Round up to the next 64 kB because that's what the NT
// kernel does, might as well make it explicit. With the current
// 20 kB red zone, that makes for a 64 kB minimum stack.
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
let ret = CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else {
mem::forget(p); // ownership passed to CreateThread
Ok(ret)
};
#[no_stack_check]
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
start_thread(main);
0
}
}
pub unsafe fn set_name(_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub unsafe fn join(native: rust_thread) {
use libc::consts::os::extra::INFINITE;
WaitForSingleObject(native, INFINITE);
}
pub unsafe fn detach(native: rust_thread) {
assert!(libc::CloseHandle(native) != 0);
}
pub unsafe fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
SwitchToThread();
}
pub fn sleep(dur: Duration) {
unsafe {
if dur < Duration::zero() {
return yield_now()
}
let ms = dur.num_milliseconds();
// if we have a fractional number of milliseconds then add an extra
// millisecond to sleep for
let extra = dur - Duration::milliseconds(ms);
let ms = ms + if extra.is_zero() {0} else {1};
Sleep(ms as DWORD);
}
}
#[allow(non_snake_case)]
extern "system" {
fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
dwStackSize: SIZE_T,
lpStartAddress: extern "system" fn(*mut c_void) -> DWORD,
lpParameter: LPVOID,
dwCreationFlags: DWORD,
lpThreadId: LPDWORD) -> HANDLE;
fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
fn SwitchToThread() -> BOOL;
fn Sleep(dwMilliseconds: DWORD);
}

View file

@ -190,6 +190,7 @@
use prelude::v1::*;
use alloc::boxed::FnBox;
use any::Any;
use cell::UnsafeCell;
use fmt;
@ -199,7 +200,6 @@ use rt::{self, unwind};
use sync::{Mutex, Condvar, Arc};
use sys::thread as imp;
use sys_common::{stack, thread_info};
use thunk::Thunk;
use time::Duration;
////////////////////////////////////////////////////////////////////////////////
@ -276,7 +276,9 @@ impl Builder {
pub fn spawn<F, T>(self, f: F) -> io::Result<JoinHandle<T>> where
F: FnOnce() -> T, F: Send + 'static, T: Send + 'static
{
self.spawn_inner(Box::new(f)).map(|i| JoinHandle(i))
unsafe {
self.spawn_inner(Box::new(f)).map(JoinHandle)
}
}
/// Spawns a new child thread that must be joined within a given
@ -299,12 +301,18 @@ impl Builder {
pub fn scoped<'a, T, F>(self, f: F) -> io::Result<JoinGuard<'a, T>> where
T: Send + 'a, F: FnOnce() -> T, F: Send + 'a
{
self.spawn_inner(Box::new(f)).map(|inner| {
JoinGuard { inner: inner, _marker: PhantomData }
})
unsafe {
self.spawn_inner(Box::new(f)).map(|inner| {
JoinGuard { inner: inner, _marker: PhantomData }
})
}
}
fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> io::Result<JoinInner<T>> {
// NB: this function is unsafe as the lifetime parameter of the code to run
// in the new thread is not tied into the return value, and the return
// value must not outlast that lifetime.
unsafe fn spawn_inner<'a, T: Send>(self, f: Box<FnBox() -> T + Send + 'a>)
-> io::Result<JoinInner<T>> {
let Builder { name, stack_size } = self;
let stack_size = stack_size.unwrap_or(rt::min_stack());
@ -312,8 +320,8 @@ impl Builder {
let my_thread = Thread::new(name);
let their_thread = my_thread.clone();
let my_packet = Packet(Arc::new(UnsafeCell::new(None)));
let their_packet = Packet(my_packet.0.clone());
let my_packet = Arc::new(UnsafeCell::new(None));
let their_packet = my_packet.clone();
// Spawning a new OS thread guarantees that __morestack will never get
// triggered, but we must manually set up the actual stack bounds once
@ -326,48 +334,27 @@ impl Builder {
let addr = &something_around_the_top_of_the_stack as *const i32;
let my_stack_top = addr as usize;
let my_stack_bottom = my_stack_top - stack_size + 1024;
unsafe {
if let Some(name) = their_thread.name() {
imp::set_name(name);
}
stack::record_os_managed_stack_bounds(my_stack_bottom,
my_stack_top);
thread_info::set(imp::guard::current(), their_thread);
}
stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
let mut output: Option<T> = None;
if let Some(name) = their_thread.name() {
imp::Thread::set_name(name);
}
thread_info::set(imp::guard::current(), their_thread);
let mut output = None;
let try_result = {
let ptr = &mut output;
// There are two primary reasons that general try/catch is
// unsafe. The first is that we do not support nested
// try/catch. The fact that this is happening in a newly-spawned
// thread suffices. The second is that unwinding while unwinding
// is not defined. We take care of that by having an
// 'unwinding' flag in the thread itself. For these reasons,
// this unsafety should be ok.
unsafe {
unwind::try(move || {
let f: Thunk<(), T> = f;
let v: T = f();
*ptr = Some(v)
})
}
unwind::try(move || *ptr = Some(f()))
};
unsafe {
*their_packet.0.get() = Some(match (output, try_result) {
(Some(data), Ok(_)) => Ok(data),
(None, Err(cause)) => Err(cause),
_ => unreachable!()
});
}
*their_packet.get() = Some(try_result.map(|()| {
output.unwrap()
}));
};
Ok(JoinInner {
native: try!(unsafe { imp::create(stack_size, Box::new(main)) }),
native: Some(try!(imp::Thread::new(stack_size, Box::new(main)))),
thread: my_thread,
packet: my_packet,
joined: false,
packet: Packet(my_packet),
})
}
}
@ -427,7 +414,7 @@ pub fn current() -> Thread {
/// Cooperatively gives up a timeslice to the OS scheduler.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn yield_now() {
unsafe { imp::yield_now() }
imp::Thread::yield_now()
}
/// Determines whether the current thread is unwinding because of panic.
@ -494,7 +481,7 @@ pub fn catch_panic<F, R>(f: F) -> Result<R>
/// spurious wakeup.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn sleep_ms(ms: u32) {
imp::sleep(Duration::milliseconds(ms as i64))
imp::Thread::sleep(Duration::milliseconds(ms as i64))
}
/// Blocks unless or until the current thread's token is made available (may wake spuriously).
@ -548,8 +535,6 @@ struct Inner {
cvar: Condvar,
}
unsafe impl Sync for Inner {}
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
/// A handle to a thread.
@ -610,24 +595,33 @@ impl thread_info::NewThread for Thread {
#[stable(feature = "rust1", since = "1.0.0")]
pub type Result<T> = ::result::Result<T, Box<Any + Send + 'static>>;
// This packet is used to communicate the return value between the child thread
// and the parent thread. Memory is shared through the `Arc` within and there's
// no need for a mutex here because synchronization happens with `join()` (the
// parent thread never reads this packet until the child has exited).
//
// This packet itself is then stored into a `JoinInner` which in turns is placed
// in `JoinHandle` and `JoinGuard`. Due to the usage of `UnsafeCell` we need to
// manually worry about impls like Send and Sync. The type `T` should
// already always be Send (otherwise the thread could not have been created) and
// this type is inherently Sync because no methods take &self. Regardless,
// however, we add inheriting impls for Send/Sync to this type to ensure it's
// Send/Sync and that future modifications will still appropriately classify it.
struct Packet<T>(Arc<UnsafeCell<Option<Result<T>>>>);
unsafe impl<T:Send> Send for Packet<T> {}
unsafe impl<T> Sync for Packet<T> {}
unsafe impl<T: Send> Send for Packet<T> {}
unsafe impl<T: Sync> Sync for Packet<T> {}
/// Inner representation for JoinHandle and JoinGuard
struct JoinInner<T> {
native: imp::rust_thread,
native: Option<imp::Thread>,
thread: Thread,
packet: Packet<T>,
joined: bool,
}
impl<T> JoinInner<T> {
fn join(&mut self) -> Result<T> {
assert!(!self.joined);
unsafe { imp::join(self.native) };
self.joined = true;
self.native.take().unwrap().join();
unsafe {
(*self.packet.0.get()).take().unwrap()
}
@ -662,16 +656,6 @@ impl<T> JoinHandle<T> {
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[unsafe_destructor]
impl<T> Drop for JoinHandle<T> {
fn drop(&mut self) {
if !self.0.joined {
unsafe { imp::detach(self.0.native) }
}
}
}
/// An RAII-style guard that will block until thread termination when dropped.
///
/// The type `T` is the return type for the thread's main function.
@ -720,14 +704,19 @@ impl<'a, T: Send + 'a> JoinGuard<'a, T> {
reason = "memory unsafe if destructor is avoided, see #24292")]
impl<'a, T: Send + 'a> Drop for JoinGuard<'a, T> {
fn drop(&mut self) {
if !self.inner.joined {
if self.inner.join().is_err() {
panic!("child thread {:?} panicked", self.thread());
}
if self.inner.native.is_some() && self.inner.join().is_err() {
panic!("child thread {:?} panicked", self.thread());
}
}
}
fn _assert_sync_and_send() {
fn _assert_both<T: Send + Sync>() {}
_assert_both::<JoinHandle<()>>();
_assert_both::<JoinGuard<()>>();
_assert_both::<Thread>();
}
////////////////////////////////////////////////////////////////////////////////
// Tests
////////////////////////////////////////////////////////////////////////////////