diff --git a/Cargo.toml b/Cargo.toml index dfffc9c..d223733 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ __test = [] crossbeam-utils = { version = "0.8.12", default-features = false } parking = { version = "2.0.0", optional = true } slab = { version = "0.4.7", default-features = false } +spin = { version = "0.9.4", default-features = false, features = ["spin_mutex"] } [dev-dependencies] waker-fn = "1" diff --git a/src/inner.rs b/src/inner.rs index 1c5a758..8bf0cc5 100644 --- a/src/inner.rs +++ b/src/inner.rs @@ -3,8 +3,7 @@ use crate::list::List; use crate::node::Node; use crate::queue::Queue; -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use crate::sync::cell::UnsafeCell; +use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::Task; use alloc::vec; @@ -129,32 +128,22 @@ impl Drop for ListGuard<'_> { /// A simple mutex type that optimistically assumes that the lock is uncontended. struct Mutex { - /// The inner value. - value: UnsafeCell, - - /// Whether the mutex is locked. - locked: AtomicBool, + inner: spin::Mutex, } impl Mutex { /// Create a new mutex. pub(crate) fn new(value: T) -> Self { Self { - value: UnsafeCell::new(value), - locked: AtomicBool::new(false), + inner: spin::Mutex::new(value), } } /// Lock the mutex. pub(crate) fn try_lock(&self) -> Option> { // Try to lock the mutex. - if self - .locked - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - // We have successfully locked the mutex. - Some(MutexGuard { mutex: self }) + if let Some(guard) = self.inner.try_lock() { + Some(MutexGuard { guard }) } else { self.try_lock_slow() } @@ -167,17 +156,13 @@ impl Mutex { let mut spins = 100u32; loop { - if self - .locked - .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { + if let Some(guard) = self.inner.try_lock() { // We have successfully locked the mutex. - return Some(MutexGuard { mutex: self }); + return Some(MutexGuard { guard }); } - // Use atomic loads instead of compare-exchange. - while self.locked.load(Ordering::Relaxed) { + // Use is_locked instead of try_lock, as it is only a load as opposed to a swap. + while self.inner.is_locked() { // Return None once we've exhausted the number of spins. spins = spins.checked_sub(1)?; } @@ -186,28 +171,19 @@ impl Mutex { } struct MutexGuard<'a, T> { - mutex: &'a Mutex, -} - -impl<'a, T> Drop for MutexGuard<'a, T> { - fn drop(&mut self) { - self.mutex.locked.store(false, Ordering::Release); - } + guard: spin::MutexGuard<'a, T>, } impl<'a, T> ops::Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &T { - unsafe { &*self.mutex.value.get() } + &self.guard } } impl<'a, T> ops::DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.mutex.value.get() } + &mut self.guard } } - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {}