Use a single ReentrantMutex implementation on all platforms.

This commit is contained in:
Mara Bos 2022-04-14 11:11:41 +02:00
parent 07bb916d44
commit 4212de63ab
12 changed files with 91 additions and 540 deletions

View file

@ -1,6 +1,5 @@
use crate::cell::UnsafeCell;
use crate::collections::VecDeque;
use crate::ffi::c_void;
use crate::hint;
use crate::ops::{Deref, DerefMut, Drop};
use crate::ptr;
@ -220,38 +219,3 @@ impl Mutex {
#[inline]
pub unsafe fn destroy(&self) {}
}
pub struct ReentrantMutex {
inner: *const c_void,
}
impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: ptr::null() }
}
#[inline]
pub unsafe fn init(&self) {
let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _);
}
#[inline]
pub unsafe fn lock(&self) {
let _ = abi::recmutex_lock(self.inner);
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
true
}
#[inline]
pub unsafe fn unlock(&self) {
let _ = abi::recmutex_unlock(self.inner);
}
#[inline]
pub unsafe fn destroy(&self) {
let _ = abi::recmutex_destroy(self.inner);
}
}

View file

@ -5,7 +5,6 @@ use super::{
error::{expect_success, expect_success_aborting, fail, ItronError},
spin::SpinIdOnceCell,
};
use crate::cell::UnsafeCell;
pub struct Mutex {
/// The ID of the underlying mutex object
@ -89,95 +88,3 @@ impl Drop for MutexGuard<'_> {
unsafe { self.0.unlock() };
}
}
// All empty stubs because this platform does not yet support threads, so lock
// acquisition always succeeds.
pub struct ReentrantMutex {
/// The ID of the underlying mutex object
mtx: abi::ID,
/// The lock count.
count: UnsafeCell<usize>,
}
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { mtx: 0, count: UnsafeCell::new(0) }
}
pub unsafe fn init(&mut self) {
self.mtx = expect_success(
unsafe {
abi::acre_mtx(&abi::T_CMTX {
// Priority inheritance mutex
mtxatr: abi::TA_INHERIT,
// Unused
ceilpri: 0,
})
},
&"acre_mtx",
);
}
pub unsafe fn lock(&self) {
match unsafe { abi::loc_mtx(self.mtx) } {
abi::E_OBJ => {
// Recursive lock
unsafe {
let count = &mut *self.count.get();
if let Some(new_count) = count.checked_add(1) {
*count = new_count;
} else {
// counter overflow
rtabort!("lock count overflow");
}
}
}
er => {
expect_success(er, &"loc_mtx");
}
}
}
pub unsafe fn unlock(&self) {
unsafe {
let count = &mut *self.count.get();
if *count > 0 {
*count -= 1;
return;
}
}
expect_success_aborting(unsafe { abi::unl_mtx(self.mtx) }, &"unl_mtx");
}
pub unsafe fn try_lock(&self) -> bool {
let er = unsafe { abi::ploc_mtx(self.mtx) };
if er == abi::E_OBJ {
// Recursive lock
unsafe {
let count = &mut *self.count.get();
if let Some(new_count) = count.checked_add(1) {
*count = new_count;
} else {
// counter overflow
rtabort!("lock count overflow");
}
}
true
} else if er == abi::E_TMOUT {
// Locked by another thread
false
} else {
expect_success(er, &"ploc_mtx");
// Top-level lock by the current thread
true
}
}
pub unsafe fn destroy(&self) {
expect_success_aborting(unsafe { abi::del_mtx(self.mtx) }, &"del_mtx");
}
}

View file

@ -1,8 +1,4 @@
use fortanix_sgx_abi::Tcs;
use super::abi::thread;
use super::waitqueue::{try_lock_or_false, NotifiedTcs, SpinMutex, WaitQueue, WaitVariable};
use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};
pub struct Mutex {
inner: SpinMutex<WaitVariable<bool>>,
@ -60,84 +56,3 @@ impl Mutex {
#[inline]
pub unsafe fn destroy(&self) {}
}
struct ReentrantLock {
owner: Option<Tcs>,
count: usize,
}
pub struct ReentrantMutex {
inner: SpinMutex<WaitVariable<ReentrantLock>>,
}
impl ReentrantMutex {
pub const fn uninitialized() -> ReentrantMutex {
ReentrantMutex {
inner: SpinMutex::new(WaitVariable::new(ReentrantLock { owner: None, count: 0 })),
}
}
#[inline]
pub unsafe fn init(&self) {}
#[inline]
pub unsafe fn lock(&self) {
let mut guard = self.inner.lock();
match guard.lock_var().owner {
Some(tcs) if tcs != thread::current() => {
// Another thread has the lock, wait
WaitQueue::wait(guard, || {});
// Another thread has passed the lock to us
}
_ => {
// We are just now obtaining the lock
guard.lock_var_mut().owner = Some(thread::current());
guard.lock_var_mut().count += 1;
}
}
}
#[inline]
pub unsafe fn unlock(&self) {
let mut guard = self.inner.lock();
if guard.lock_var().count > 1 {
guard.lock_var_mut().count -= 1;
} else {
match WaitQueue::notify_one(guard) {
Err(mut guard) => {
// No other waiters, unlock
guard.lock_var_mut().count = 0;
guard.lock_var_mut().owner = None;
}
Ok(mut guard) => {
// There was a thread waiting, just pass the lock
if let NotifiedTcs::Single(tcs) = guard.notified_tcs() {
guard.lock_var_mut().owner = Some(tcs)
} else {
unreachable!() // called notify_one
}
}
}
}
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
let mut guard = try_lock_or_false!(self.inner);
match guard.lock_var().owner {
Some(tcs) if tcs != thread::current() => {
// Another thread has the lock
false
}
_ => {
// We are just now obtaining the lock
guard.lock_var_mut().owner = Some(thread::current());
guard.lock_var_mut().count += 1;
true
}
}
}
#[inline]
pub unsafe fn destroy(&self) {}
}

View file

@ -1,6 +1,5 @@
use crate::cell::UnsafeCell;
use crate::sync::atomic::{
AtomicU32, AtomicUsize,
AtomicU32,
Ordering::{Acquire, Relaxed, Release},
};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
@ -163,98 +162,3 @@ impl Condvar {
r
}
}
/// A reentrant mutex. Used by stdout().lock() and friends.
///
/// The 'owner' field tracks which thread has locked the mutex.
///
/// We use current_thread_unique_ptr() as the thread identifier,
/// which is just the address of a thread local variable.
///
/// If `owner` is set to the identifier of the current thread,
/// we assume the mutex is already locked and instead of locking it again,
/// we increment `lock_count`.
///
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
/// it reaches zero.
///
/// `lock_count` is protected by the mutex and only accessed by the thread that has
/// locked the mutex, so needs no synchronization.
///
/// `owner` can be checked by other threads that want to see if they already
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
/// same thread that holds the mutex and memory access can use relaxed ordering
/// since we're not dealing with multiple threads. If it compares unequal,
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex {
mutex: Mutex,
owner: AtomicUsize,
lock_count: UnsafeCell<u32>,
}
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
#[inline]
pub const unsafe fn uninitialized() -> Self {
Self { mutex: Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0) }
}
#[inline]
pub unsafe fn init(&self) {}
#[inline]
pub unsafe fn destroy(&self) {}
pub unsafe fn try_lock(&self) -> bool {
let this_thread = current_thread_unique_ptr();
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
true
} else if self.mutex.try_lock() {
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
true
} else {
false
}
}
pub unsafe fn lock(&self) {
let this_thread = current_thread_unique_ptr();
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
} else {
self.mutex.lock();
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
}
}
unsafe fn increment_lock_count(&self) {
*self.lock_count.get() = (*self.lock_count.get())
.checked_add(1)
.expect("lock count overflow in reentrant mutex");
}
pub unsafe fn unlock(&self) {
*self.lock_count.get() -= 1;
if *self.lock_count.get() == 0 {
self.owner.store(0, Relaxed);
self.mutex.unlock();
}
}
}
/// Get an address that is unique per running thread.
///
/// This can be used as a non-null usize-sized ID.
pub fn current_thread_unique_ptr() -> usize {
// Use a non-drop type to make sure it's still available during thread destruction.
thread_local! { static X: u8 = const { 0 } }
X.with(|x| <*const _>::addr(x))
}

View file

@ -5,15 +5,13 @@ cfg_if::cfg_if! {
))] {
mod futex;
mod futex_rwlock;
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar, ReentrantMutex};
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar};
pub use futex_rwlock::{RwLock, MovableRwLock};
} else {
mod pthread_mutex;
mod pthread_remutex;
mod pthread_rwlock;
mod pthread_condvar;
pub use pthread_mutex::{Mutex, MovableMutex};
pub use pthread_remutex::ReentrantMutex;
pub use pthread_rwlock::{RwLock, MovableRwLock};
pub use pthread_condvar::{Condvar, MovableCondvar};
}

View file

@ -1,46 +0,0 @@
use super::pthread_mutex::PthreadMutexAttr;
use crate::cell::UnsafeCell;
use crate::mem::MaybeUninit;
use crate::sys::cvt_nz;
pub struct ReentrantMutex {
inner: UnsafeCell<libc::pthread_mutex_t>,
}
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
}
pub unsafe fn init(&self) {
let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
let attr = PthreadMutexAttr(&mut attr);
cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_RECURSIVE))
.unwrap();
cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
}
pub unsafe fn lock(&self) {
let result = libc::pthread_mutex_lock(self.inner.get());
debug_assert_eq!(result, 0);
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
libc::pthread_mutex_trylock(self.inner.get()) == 0
}
pub unsafe fn unlock(&self) {
let result = libc::pthread_mutex_unlock(self.inner.get());
debug_assert_eq!(result, 0);
}
pub unsafe fn destroy(&self) {
let result = libc::pthread_mutex_destroy(self.inner.get());
debug_assert_eq!(result, 0);
}
}

View file

@ -36,26 +36,3 @@ impl Mutex {
#[inline]
pub unsafe fn destroy(&self) {}
}
// All empty stubs because this platform does not yet support threads, so lock
// acquisition always succeeds.
pub struct ReentrantMutex {}
impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex {}
}
pub unsafe fn init(&self) {}
pub unsafe fn lock(&self) {}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
true
}
pub unsafe fn unlock(&self) {}
pub unsafe fn destroy(&self) {}
}

View file

@ -1,8 +1,6 @@
use crate::arch::wasm32;
use crate::cell::UnsafeCell;
use crate::mem;
use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst};
use crate::sys::thread;
use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
pub struct Mutex {
locked: AtomicUsize,
@ -64,93 +62,3 @@ impl Mutex {
self.locked.as_mut_ptr() as *mut i32
}
}
pub struct ReentrantMutex {
owner: AtomicU32,
recursions: UnsafeCell<u32>,
}
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
// Reentrant mutexes are similarly implemented to mutexes above except that
// instead of "1" meaning unlocked we use the id of a thread to represent
// whether it has locked a mutex. That way we have an atomic counter which
// always holds the id of the thread that currently holds the lock (or 0 if the
// lock is unlocked).
//
// Once a thread acquires a lock recursively, which it detects by looking at
// the value that's already there, it will update a local `recursions` counter
// in a nonatomic fashion (as we hold the lock). The lock is then fully
// released when this recursion counter reaches 0.
impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) }
}
pub unsafe fn init(&self) {
// nothing to do...
}
pub unsafe fn lock(&self) {
let me = thread::my_id();
while let Err(owner) = self._try_lock(me) {
// SAFETY: the caller must guarantee that `self.ptr()` and `owner` are valid i32.
let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) };
debug_assert!(val == 0 || val == 1);
}
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
unsafe { self._try_lock(thread::my_id()).is_ok() }
}
#[inline]
unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> {
let id = id.checked_add(1).unwrap();
match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
// we transitioned from unlocked to locked
Ok(_) => {
debug_assert_eq!(*self.recursions.get(), 0);
Ok(())
}
// we currently own this lock, so let's update our count and return
// true.
Err(n) if n == id => {
*self.recursions.get() += 1;
Ok(())
}
// Someone else owns the lock, let our caller take care of it
Err(other) => Err(other),
}
}
pub unsafe fn unlock(&self) {
// If we didn't ever recursively lock the lock then we fully unlock the
// mutex and wake up a waiter, if any. Otherwise we decrement our
// recursive counter and let some one else take care of the zero.
match *self.recursions.get() {
0 => {
self.owner.swap(0, SeqCst);
// SAFETY: the caller must guarantee that `self.ptr()` is valid i32.
unsafe {
wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1);
} // wake up one waiter, if any
}
ref mut n => *n -= 1,
}
}
pub unsafe fn destroy(&self) {
// nothing to do...
}
#[inline]
fn ptr(&self) -> *mut i32 {
self.owner.as_mut_ptr() as *mut i32
}
}

View file

@ -536,15 +536,6 @@ pub struct CONDITION_VARIABLE {
pub struct SRWLOCK {
pub ptr: LPVOID,
}
#[repr(C)]
pub struct CRITICAL_SECTION {
CriticalSectionDebug: LPVOID,
LockCount: LONG,
RecursionCount: LONG,
OwningThread: HANDLE,
LockSemaphore: HANDLE,
SpinCount: ULONG_PTR,
}
#[repr(C)]
pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
@ -875,11 +866,6 @@ if #[cfg(target_vendor = "uwp")] {
#[link(name = "kernel32")]
extern "system" {
pub fn GetCurrentProcessId() -> DWORD;
pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOL;
pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn GetSystemDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
pub fn RemoveDirectoryW(lpPathName: LPCWSTR) -> BOOL;

View file

@ -2,5 +2,5 @@ mod condvar;
mod mutex;
mod rwlock;
pub use condvar::{Condvar, MovableCondvar};
pub use mutex::{MovableMutex, Mutex, ReentrantMutex};
pub use mutex::{MovableMutex, Mutex};
pub use rwlock::{MovableRwLock, RwLock};

View file

@ -15,7 +15,6 @@
//! is that there are no guarantees of fairness.
use crate::cell::UnsafeCell;
use crate::mem::MaybeUninit;
use crate::sys::c;
pub struct Mutex {
@ -60,37 +59,3 @@ impl Mutex {
// SRWLock does not need to be destroyed.
}
}
pub struct ReentrantMutex {
inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
}
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub const fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: MaybeUninit::uninit() }
}
pub unsafe fn init(&self) {
c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}
pub unsafe fn lock(&self) {
c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
}
pub unsafe fn unlock(&self) {
c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}
pub unsafe fn destroy(&self) {
c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}
}

View file

@ -1,10 +1,12 @@
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
use crate::cell::UnsafeCell;
use crate::marker::PhantomPinned;
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
use crate::pin::Pin;
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use crate::sys::locks as sys;
/// A re-entrant mutual exclusion
@ -12,8 +14,36 @@ use crate::sys::locks as sys;
/// This mutex will block *other* threads waiting for the lock to become
/// available. The thread which has already locked the mutex can lock it
/// multiple times without blocking, preventing a common source of deadlocks.
///
/// This is used by stdout().lock() and friends.
///
/// ## Implementation details
///
/// The 'owner' field tracks which thread has locked the mutex.
///
/// We use current_thread_unique_ptr() as the thread identifier,
/// which is just the address of a thread local variable.
///
/// If `owner` is set to the identifier of the current thread,
/// we assume the mutex is already locked and instead of locking it again,
/// we increment `lock_count`.
///
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
/// it reaches zero.
///
/// `lock_count` is protected by the mutex and only accessed by the thread that has
/// locked the mutex, so needs no synchronization.
///
/// `owner` can be checked by other threads that want to see if they already
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
/// same thread that holds the mutex and memory access can use relaxed ordering
/// since we're not dealing with multiple threads. If it compares unequal,
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex<T> {
inner: sys::ReentrantMutex,
mutex: sys::Mutex,
owner: AtomicUsize,
lock_count: UnsafeCell<u32>,
data: T,
_pinned: PhantomPinned,
}
@ -53,7 +83,9 @@ impl<T> ReentrantMutex<T> {
/// lock/unlock methods safe.
pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
ReentrantMutex {
inner: sys::ReentrantMutex::uninitialized(),
mutex: sys::Mutex::new(),
owner: AtomicUsize::new(0),
lock_count: UnsafeCell::new(0),
data: t,
_pinned: PhantomPinned,
}
@ -66,7 +98,7 @@ impl<T> ReentrantMutex<T> {
/// Unsafe to call more than once, and must be called after this will no
/// longer move in memory.
pub unsafe fn init(self: Pin<&mut Self>) {
self.get_unchecked_mut().inner.init()
self.get_unchecked_mut().mutex.init()
}
/// Acquires a mutex, blocking the current thread until it is able to do so.
@ -82,7 +114,19 @@ impl<T> ReentrantMutex<T> {
/// this call will return failure if the mutex would otherwise be
/// acquired.
pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
unsafe { self.inner.lock() }
let this_thread = current_thread_unique_ptr();
// Safety: We only touch lock_count when we own the lock,
// and since self is pinned we can safely call the lock() on the mutex.
unsafe {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
} else {
self.mutex.lock();
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
}
}
ReentrantMutexGuard { lock: self }
}
@ -99,20 +143,35 @@ impl<T> ReentrantMutex<T> {
/// this call will return failure if the mutex would otherwise be
/// acquired.
pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
if unsafe { self.inner.try_lock() } {
Some(ReentrantMutexGuard { lock: self })
} else {
None
let this_thread = current_thread_unique_ptr();
// Safety: We only touch lock_count when we own the lock,
// and since self is pinned we can safely call the try_lock on the mutex.
unsafe {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
Some(ReentrantMutexGuard { lock: self })
} else if self.mutex.try_lock() {
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
Some(ReentrantMutexGuard { lock: self })
} else {
None
}
}
}
unsafe fn increment_lock_count(&self) {
*self.lock_count.get() = (*self.lock_count.get())
.checked_add(1)
.expect("lock count overflow in reentrant mutex");
}
}
impl<T> Drop for ReentrantMutex<T> {
fn drop(&mut self) {
// This is actually safe b/c we know that there is no further usage of
// this mutex (it's up to the user to arrange for a mutex to get
// dropped, that's not our job)
unsafe { self.inner.destroy() }
// Safety: We're the unique owner of this mutex and not going to use it afterwards.
unsafe { self.mutex.destroy() }
}
}
@ -127,8 +186,22 @@ impl<T> Deref for ReentrantMutexGuard<'_, T> {
impl<T> Drop for ReentrantMutexGuard<'_, T> {
#[inline]
fn drop(&mut self) {
// Safety: We own the lock, and the lock is pinned.
unsafe {
self.lock.inner.unlock();
*self.lock.lock_count.get() -= 1;
if *self.lock.lock_count.get() == 0 {
self.lock.owner.store(0, Relaxed);
self.lock.mutex.unlock();
}
}
}
}
/// Get an address that is unique per running thread.
///
/// This can be used as a non-null usize-sized ID.
pub fn current_thread_unique_ptr() -> usize {
// Use a non-drop type to make sure it's still available during thread destruction.
thread_local! { static X: u8 = const { 0 } }
X.with(|x| <*const _>::addr(x))
}