diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs index f1d264b38a0..aabc06b1986 100644 --- a/src/libstd/sync/mutex.rs +++ b/src/libstd/sync/mutex.rs @@ -13,7 +13,9 @@ use prelude::v1::*; use cell::UnsafeCell; use fmt; use marker; +use mem; use ops::{Deref, DerefMut}; +use ptr; use sys_common::mutex as sys; use sys_common::poison::{self, TryLockError, TryLockResult, LockResult}; @@ -243,6 +245,50 @@ impl Mutex { pub fn is_poisoned(&self) -> bool { self.inner.poison.get() } + + /// Consumes this mutex, returning the underlying data. + /// + /// # Failure + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will return an error instead. + #[unstable(feature = "mutex_into_inner", reason = "recently added", issue = "28968")] + pub fn into_inner(self) -> LockResult where T: Sized { + // We know statically that there are no outstanding references to + // `self` so there's no need to lock the inner StaticMutex. + // + // To get the inner value, we'd like to call `data.into_inner()`, + // but because `Mutex` impl-s `Drop`, we can't move out of it, so + // we'll have to destructure it manually instead. + unsafe { + // Like `let Mutex { inner, data } = self`. + let (inner, data) = { + let Mutex { ref inner, ref data } = self; + (ptr::read(inner), ptr::read(data)) + }; + mem::forget(self); + inner.lock.destroy(); // Keep in sync with the `Drop` impl. + + poison::map_result(inner.poison.borrow(), |_| data.into_inner()) + } + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `Mutex` mutably, no actual locking needs to + /// take place---the mutable borrow statically guarantees no locks exist. + /// + /// # Failure + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will return an error instead. + #[unstable(feature = "mutex_get_mut", reason = "recently added", issue = "28968")] + pub fn get_mut(&mut self) -> LockResult<&mut T> { + // We know statically that there are no other references to `self`, so + // there's no need to lock the inner StaticMutex. + let data = unsafe { &mut *self.data.get() }; + poison::map_result(self.inner.poison.borrow(), |_| data ) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -251,6 +297,8 @@ impl Drop for Mutex { // This is actually safe b/c we know that there is no further usage of // this mutex (it's up to the user to arrange for a mutex to get // dropped, that's not our job) + // + // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`. unsafe { self.inner.lock.destroy() } } } @@ -371,10 +419,14 @@ mod tests { use sync::mpsc::channel; use sync::{Arc, Mutex, StaticMutex, Condvar}; + use sync::atomic::{AtomicUsize, Ordering}; use thread; struct Packet(Arc<(Mutex, Condvar)>); + #[derive(Eq, PartialEq, Debug)] + struct NonCopy(i32); + unsafe impl Send for Packet {} unsafe impl Sync for Packet {} @@ -435,6 +487,69 @@ mod tests { *m.try_lock().unwrap() = (); } + #[test] + fn test_into_inner() { + let m = Mutex::new(NonCopy(10)); + assert_eq!(m.into_inner().unwrap(), NonCopy(10)); + } + + #[test] + fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner().unwrap(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } + + #[test] + fn test_into_inner_poison() { + let m = Arc::new(Mutex::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.lock().unwrap(); + panic!("test panic in inner thread to poison mutex"); + }).join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().into_inner() { + Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), + Ok(x) => panic!("into_inner of poisoned Mutex is Ok: {:?}", x), + } + } + + #[test] + fn test_get_mut() { + let mut m = Mutex::new(NonCopy(10)); + *m.get_mut().unwrap() = NonCopy(20); + assert_eq!(m.into_inner().unwrap(), NonCopy(20)); + } + + #[test] + fn test_get_mut_poison() { + let m = Arc::new(Mutex::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.lock().unwrap(); + panic!("test panic in inner thread to poison mutex"); + }).join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().get_mut() { + Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), + Ok(x) => panic!("get_mut of poisoned Mutex is Ok: {:?}", x), + } + } + #[test] fn test_mutex_arc_condvar() { let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); diff --git a/src/libstd/sync/rwlock.rs b/src/libstd/sync/rwlock.rs index 04ad4708246..9278481f2d6 100644 --- a/src/libstd/sync/rwlock.rs +++ b/src/libstd/sync/rwlock.rs @@ -13,7 +13,9 @@ use prelude::v1::*; use cell::UnsafeCell; use fmt; use marker; +use mem; use ops::{Deref, DerefMut}; +use ptr; use sys_common::poison::{self, LockResult, TryLockError, TryLockResult}; use sys_common::rwlock as sys; @@ -260,11 +262,60 @@ impl RwLock { pub fn is_poisoned(&self) -> bool { self.inner.poison.get() } + + /// Consumes this `RwLock`, returning the underlying data. + /// + /// # Failure + /// + /// This function will return an error if the RwLock is poisoned. An RwLock + /// is poisoned whenever a writer panics while holding an exclusive lock. An + /// error will only be returned if the lock would have otherwise been + /// acquired. + #[unstable(feature = "rwlock_into_inner", reason = "recently added", issue = "28968")] + pub fn into_inner(self) -> LockResult where T: Sized { + // We know statically that there are no outstanding references to + // `self` so there's no need to lock the inner StaticRwLock. + // + // To get the inner value, we'd like to call `data.into_inner()`, + // but because `RwLock` impl-s `Drop`, we can't move out of it, so + // we'll have to destructure it manually instead. + unsafe { + // Like `let RwLock { inner, data } = self`. + let (inner, data) = { + let RwLock { ref inner, ref data } = self; + (ptr::read(inner), ptr::read(data)) + }; + mem::forget(self); + inner.lock.destroy(); // Keep in sync with the `Drop` impl. + + poison::map_result(inner.poison.borrow(), |_| data.into_inner()) + } + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `RwLock` mutably, no actual locking needs to + /// take place---the mutable borrow statically guarantees no locks exist. + /// + /// # Failure + /// + /// This function will return an error if the RwLock is poisoned. An RwLock + /// is poisoned whenever a writer panics while holding an exclusive lock. An + /// error will only be returned if the lock would have otherwise been + /// acquired. + #[unstable(feature = "rwlock_get_mut", reason = "recently added", issue = "28968")] + pub fn get_mut(&mut self) -> LockResult<&mut T> { + // We know statically that there are no other references to `self`, so + // there's no need to lock the inner StaticRwLock. + let data = unsafe { &mut *self.data.get() }; + poison::map_result(self.inner.poison.borrow(), |_| data ) + } } #[stable(feature = "rust1", since = "1.0.0")] impl Drop for RwLock { fn drop(&mut self) { + // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`. unsafe { self.inner.lock.destroy() } } } @@ -426,6 +477,10 @@ mod tests { use sync::mpsc::channel; use thread; use sync::{Arc, RwLock, StaticRwLock, TryLockError}; + use sync::atomic::{AtomicUsize, Ordering}; + + #[derive(Eq, PartialEq, Debug)] + struct NonCopy(i32); #[test] fn smoke() { @@ -606,4 +661,67 @@ mod tests { drop(read_guard); } + + #[test] + fn test_into_inner() { + let m = RwLock::new(NonCopy(10)); + assert_eq!(m.into_inner().unwrap(), NonCopy(10)); + } + + #[test] + fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = RwLock::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner().unwrap(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } + + #[test] + fn test_into_inner_poison() { + let m = Arc::new(RwLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison RwLock"); + }).join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().into_inner() { + Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), + Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x), + } + } + + #[test] + fn test_get_mut() { + let mut m = RwLock::new(NonCopy(10)); + *m.get_mut().unwrap() = NonCopy(20); + assert_eq!(m.into_inner().unwrap(), NonCopy(20)); + } + + #[test] + fn test_get_mut_poison() { + let m = Arc::new(RwLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison RwLock"); + }).join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().get_mut() { + Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), + Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x), + } + } }