From 16d65d04322de4a00327dfe26b4af6bd3e4187c8 Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Tue, 6 Oct 2020 11:39:59 +0200 Subject: [PATCH] revise Hermit's mutex interface to support the behaviour of StaticMutex rust-lang/rust#77147 simplifies things by splitting this Mutex type into two types matching the two use cases: StaticMutex and MovableMutex. To support the behavior of StaticMutex, we move part of the mutex implementation into libstd. --- library/std/src/sys/hermit/mutex.rs | 190 ++++++++++++++++++++++++++-- 1 file changed, 182 insertions(+), 8 deletions(-) diff --git a/library/std/src/sys/hermit/mutex.rs b/library/std/src/sys/hermit/mutex.rs index 3d4813209cb..511a5100ac6 100644 --- a/library/std/src/sys/hermit/mutex.rs +++ b/library/std/src/sys/hermit/mutex.rs @@ -1,9 +1,161 @@ +use crate::cell::UnsafeCell; +use crate::collections::VecDeque; use crate::ffi::c_void; +use crate::ops::{Deref, DerefMut, Drop}; use crate::ptr; +use crate::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint}; use crate::sys::hermit::abi; +/// This type provides a lock based on busy waiting to realize mutual exclusion +/// +/// # Description +/// +/// This structure behaves a lot like a common mutex. There are some differences: +/// +/// - By using busy waiting, it can be used outside the runtime. +/// - It is a so called ticket lock (https://en.wikipedia.org/wiki/Ticket_lock) +/// and completly fair. +#[cfg_attr(target_arch = "x86_64", repr(align(128)))] +#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] +struct Spinlock { + queue: AtomicUsize, + dequeue: AtomicUsize, + data: UnsafeCell, +} + +unsafe impl Sync for Spinlock {} +unsafe impl Send for Spinlock {} + +/// A guard to which the protected data can be accessed +/// +/// When the guard falls out of scope it will release the lock. +struct SpinlockGuard<'a, T: ?Sized + 'a> { + dequeue: &'a AtomicUsize, + data: &'a mut T, +} + +impl Spinlock { + pub const fn new(user_data: T) -> Spinlock { + SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() } + } + + #[inline] + fn obtain_lock(&self) { + let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1; + while self.dequeue.load(Ordering::SeqCst) != ticket { + spin_loop_hint(); + } + } + + #[inline] + pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> { + self.obtain_lock(); + SpinlockGuard { + dequeue: &self.dequeue, + data: &mut *self.data.get(), + } + } +} + +impl Default for Spinlock { + fn default() -> Spinlock { + Spinlock::new(Default::default()) + } +} + +impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + &*self.data + } +} + +impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + &mut *self.data + } +} + +impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> { + /// The dropping of the SpinlockGuard will release the lock it was created from. + fn drop(&mut self) { + self.dequeue.fetch_add(1, Ordering::SeqCst); + } +} + +/// Realize a priority queue for tasks +struct PriorityQueue { + queues: [Option>; abi::NO_PRIORITIES], + prio_bitmap: u64, +} + +impl PriorityQueue { + pub const fn new() -> PriorityQueue { + PriorityQueue { + queues: [ + None, None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, + ], + prio_bitmap: 0, + } + } + + /// Add a task handle by its priority to the queue + pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) { + let i: usize = prio.into().into(); + self.prio_bitmap |= (1 << i) as u64; + if let Some(queue) = &mut self.queues[i] { + queue.push_back(id); + } else { + let mut queue = VecDeque::new(); + queue.push_back(id); + self.queues[i] = Some(queue); + } + } + + fn pop_from_queue(&mut self, queue_index: usize) -> Option { + if let Some(queue) = &mut self.queues[queue_index] { + let id = queue.pop_front(); + + if queue.is_empty() { + self.prio_bitmap &= !(1 << queue_index as u64); + } + + id + } else { + None + } + } + + /// Pop the task handle with the highest priority from the queue + pub fn pop(&mut self) -> Option { + for i in 0..abi::NO_PRIORITIES { + if self.prio_bitmap & (1 << i) != 0 { + return self.pop_from_queue(i); + } + } + + None + } +} + +struct MutexInner { + locked: bool, + blocked_task: PriorityQueue, +} + +impl MutexInner { + pub const fn new() -> MutexInner { + MutexInner { + locked: false, + blocked_task: PriorityQueue::new(), + } + } +} + pub struct Mutex { - inner: *const c_void, + inner: Spinlock, } unsafe impl Send for Mutex {} @@ -11,33 +163,55 @@ unsafe impl Sync for Mutex {} impl Mutex { pub const fn new() -> Mutex { - Mutex { inner: ptr::null() } + Mutex { + inner: Spinlock::new(MutexInner::new()), + } } #[inline] pub unsafe fn init(&mut self) { - let _ = abi::sem_init(&mut self.inner as *mut *const c_void, 1); + self.inner = Spinlock::new(MutexInner::new()); } #[inline] pub unsafe fn lock(&self) { - let _ = abi::sem_timedwait(self.inner, 0); + loop { + let mut guard = self.inner.lock(); + if guard.locked == false { + guard.locked = true; + return; + } else { + let prio = abi::get_priority(); + let id = abi::getpid(); + + guard.blocked_task.push(prio, id); + abi::block_current_task(); + drop(guard); + abi::yield_now(); + } + } } #[inline] pub unsafe fn unlock(&self) { - let _ = abi::sem_post(self.inner); + let mut guard = self.inner.lock(); + guard.locked = false; + if let Some(tid) = guard.blocked_task.pop() { + abi::wakeup_task(tid); + } } #[inline] pub unsafe fn try_lock(&self) -> bool { - let result = abi::sem_trywait(self.inner); - result == 0 + let mut guard = self.inner.lock(); + if guard.locked == false { + guard.locked = true; + } + guard.locked } #[inline] pub unsafe fn destroy(&self) { - let _ = abi::sem_destroy(self.inner); } }