diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 1e82a03f28630..c1a1b8c3d8cba 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -150,6 +150,7 @@ #![feature(wrapping)] #![feature(zero_one)] #![cfg_attr(windows, feature(str_utf16))] +#![cfg_attr(windows, feature(num_bits_bytes))] #![cfg_attr(test, feature(float_from_str_radix, range_inclusive, float_extras, hash_default))] #![cfg_attr(test, feature(test, rustc_private, float_consts))] #![cfg_attr(target_env = "msvc", feature(link_args))] diff --git a/src/libstd/sys/windows/c.rs b/src/libstd/sys/windows/c.rs index 06c14b39e124a..614946b571a6c 100644 --- a/src/libstd/sys/windows/c.rs +++ b/src/libstd/sys/windows/c.rs @@ -518,36 +518,293 @@ compat_fn! { _dwBufferSize: DWORD) -> BOOL { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 } +} + +// Functions for SRWLocks and condition variables. Fallbacks will be used for +// all functions if any aren't available since Windows Vista has SRWLocks, but +// doesn't have the TryAcquireSRWLock* functions. +use sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; +use ptr; + +compat_group! { + SyncPrimitives, SYNC_PRIMITIVES, load_sync_primitives, kernel32: + // FIXME - Implement shared lock flag? pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE, SRWLock: PSRWLOCK, dwMilliseconds: DWORD, Flags: ULONG) -> BOOL { - panic!("condition variables not available") + { + let condvar = &*(ConditionVariable as *mut AtomicUsize); + // Increment the waiting thread counter. + condvar.fetch_add(1, Ordering::SeqCst); + let mut timeout = dwMilliseconds as libc::LARGE_INTEGER; + let timeout_ptr = if dwMilliseconds == libc::INFINITE { + ptr::null_mut() + } else { + &mut timeout as *mut _ + }; + ReleaseSRWLockExclusive(SRWLock); + let mut status = NtWaitForKeyedEvent(keyed_event_handle(), + ConditionVariable as PVOID, + 0, + timeout_ptr); + if status != STATUS_SUCCESS { + // If we weren't woken by another thread, try to decrement the counter. + if !update_atomic(condvar, |x| (if x > 0 { x - 1 } else { x }, x > 0)) { + // If we can't decrement it, another thread is trying to wake us + // up right now. Wait so that we can allow it to do so. + status = NtWaitForKeyedEvent(keyed_event_handle(), + ConditionVariable as PVOID, + 0, + ptr::null_mut()); + } + } + AcquireSRWLockExclusive(SRWLock); + SetLastError(RtlNtStatusToDosError(status) as DWORD); + if status == STATUS_SUCCESS { libc::TRUE } else { libc::FALSE } + } } pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE) -> () { - panic!("condition variables not available") + { + let condvar = &*(ConditionVariable as *mut AtomicUsize); + // Try to decrement the thread counter. + if update_atomic(condvar, |x| (if x > 0 { x - 1 } else { x }, x > 0)) { + // If successful, wake up a thread. + NtReleaseKeyedEvent(keyed_event_handle(), + ConditionVariable as PVOID, + 0, + ptr::null_mut()); + } + } } pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE) -> () { - panic!("condition variables not available") + { + let condvar = &*(ConditionVariable as *mut AtomicUsize); + // Take the thread counter value, swap it with zero, and wake up that many threads. + for _ in 0..condvar.swap(0, Ordering::SeqCst) { + NtReleaseKeyedEvent(keyed_event_handle(), + ConditionVariable as PVOID, + 0, + ptr::null_mut()); + } + } } pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") + { + // Increment the exclusive counter and wait if any other thread is + // holding this lock in any way. + let wait = update_srwlock(&*(SRWLock as *mut AtomicUsize), |f, ex, sh| { + let f = f | ((ex == 0) & (sh == 0)); + let ex = ex + 1; + (f, ex, sh, !(f & (ex == 1))) + }); + if wait { + NtWaitForKeyedEvent(keyed_event_handle(), SRWLock as PVOID, 0, ptr::null_mut()); + } + } } pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") + { + // Increment the shared counter and wait if the lock is currently being + // held exclusively. + let wait = update_srwlock(&*(SRWLock as *mut AtomicUsize), |f, ex, sh| { + let sh = sh + 1; + (f, ex, sh, f) + }); + if wait { + NtWaitForKeyedEvent(keyed_event_handle(), + ((SRWLock as usize) + 2) as PVOID, + 0, + ptr::null_mut()); + } + } } pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") + { + // If other threads are trying to hold this lock exclusively, wake one up. + // Otherwise, if threads are trying to share this lock, wake them all up. + let release = update_srwlock(&*(SRWLock as *mut AtomicUsize), |f, ex, sh| { + let ex = ex - 1; + let rel = if ex > 0 { + Release::Exclusive + } else if sh > 0 { + Release::Shared(sh) + } else { + Release::None + }; + (rel == Release::Exclusive, ex, sh, rel) + }); + release_srwlock(SRWLock, release); + } } pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") + { + // If we're the last thread to share this lock and other threads are trying to + // hold it exclusively, wake one up. + let release = update_srwlock(&*(SRWLock as *mut AtomicUsize), |f, ex, sh| { + let sh = sh - 1; + let f = (ex > 0) & (sh == 0); + (f, ex, sh, if f { Release::Exclusive } else { Release::None }) + }); + release_srwlock(SRWLock, release); + } } pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN { - panic!("rwlocks not available") + update_srwlock(&*(SRWLock as *mut AtomicUsize), |f, ex, sh| { + if (f, ex, sh) == (false, 0, 0) { + (true, 1, 0, libc::TRUE as BOOLEAN) + } else { + (f, ex, sh, libc::FALSE as BOOLEAN) + } + }) } pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN { - panic!("rwlocks not available") + update_srwlock(&*(SRWLock as *mut AtomicUsize), |f, ex, sh| { + if !f { + (false, ex, sh + 1, libc::TRUE as BOOLEAN) + } else { + (f, ex, sh, libc::FALSE as BOOLEAN) + } + }) + } +} + +// This implementation splits the SRWLock into 3 parts: a shared thread count, an exclusive thread +// count, and an exclusive lock flag. The shared thread count is stored in the lower half, and the +// exclusive thread count is stored in the upper half, except for the MSB which is used for the +// exclusive lock flag. +const EXCLUSIVE_FLAG: usize = 1 << (::usize::BITS - 1); +const EXCLUSIVE_MASK: usize = !(SHARED_MASK | EXCLUSIVE_FLAG); +const EXCLUSIVE_SHIFT: usize = ::usize::BITS / 2; +const SHARED_MASK: usize = (1 << EXCLUSIVE_SHIFT) - 1; + +fn decompose_srwlock(x: usize) -> (bool, usize, usize) { + ((x & EXCLUSIVE_FLAG) != 0, (x & EXCLUSIVE_MASK) >> EXCLUSIVE_SHIFT, x & SHARED_MASK) +} + +fn compose_srwlock(flag: bool, exclusive: usize, shared: usize) -> usize { + (if flag { EXCLUSIVE_FLAG } else { 0 }) | (exclusive << EXCLUSIVE_SHIFT) | shared +} + +use ops::FnMut; +fn update_srwlock (bool, usize, usize, T)> + (atom: &AtomicUsize, mut func: F) -> T { + update_atomic(atom, |x| { + let (f, ex, sh) = decompose_srwlock(x); + let (f, ex, sh, ret) = func(f, ex, sh); + (compose_srwlock(f, ex, sh), ret) + }) +} + +fn update_atomic (usize, T)>(atom: &AtomicUsize, mut func: F) -> T { + let mut old = atom.load(Ordering::SeqCst); + loop { + let (new, ret) = func(old); + let cmp = atom.compare_and_swap(old, new, Ordering::SeqCst); + if cmp == old { + return ret; + } else { + old = cmp; + } + } +} + +#[derive(PartialEq, Copy, Clone)] +enum Release { + None, + Exclusive, + Shared(usize) +} + +fn release_srwlock(srwlock: PSRWLOCK, rel: Release) { + let exclusive = srwlock as PVOID; + let shared = ((exclusive as usize) + 2) as PVOID; + let handle = keyed_event_handle(); + match rel { + Release::None => {}, + Release::Exclusive => { + unsafe { NtReleaseKeyedEvent(handle, exclusive, 0, ptr::null_mut()); } + }, + Release::Shared(s) => { + for _ in 0..s { + unsafe { NtReleaseKeyedEvent(handle, shared, 0, ptr::null_mut()); } + } + } + } +} + +fn keyed_event_handle() -> HANDLE { + static KE_HANDLE: AtomicPtr<()> = AtomicPtr::new(libc::INVALID_HANDLE_VALUE as *mut ()); + + fn load() -> HANDLE { + static LOCK: AtomicBool = AtomicBool::new(false); + while LOCK.fetch_or(true, Ordering::SeqCst) { + // busywait... + } + let mut h: HANDLE = KE_HANDLE.load(Ordering::SeqCst) as HANDLE; + if h == libc::INVALID_HANDLE_VALUE { + let status = unsafe { + NtCreateKeyedEvent((&mut h) as PHANDLE, !0, ptr::null_mut(), 0) + }; + if status != STATUS_SUCCESS { + LOCK.store(false, Ordering::SeqCst); + panic!("error creating keyed event handle"); + } + KE_HANDLE.store(h as *mut (), Ordering::SeqCst); + } + LOCK.store(false, Ordering::SeqCst); + h + } + + let handle = KE_HANDLE.load(Ordering::SeqCst) as HANDLE; + if handle == libc::INVALID_HANDLE_VALUE { + load() + } else { + handle + } +} + +// Undocumented functions for keyed events used by SRWLock and condition +// variable fallbacks. Don't need fallbacks for these, but put them here to +// avoid directly linking to them in (the unlikely) case these functions are +// removed in later versions of Windows. +pub type PHANDLE = libc::LPHANDLE; +pub type ACCESS_MASK = ULONG; +pub type NTSTATUS = LONG; +pub type PVOID = LPVOID; +pub type PLARGE_INTEGER = *mut libc::LARGE_INTEGER; + +pub const STATUS_SUCCESS: NTSTATUS = 0x00000000; +pub const STATUS_NOT_IMPLEMENTED: NTSTATUS = 0xC0000002; + +compat_fn! { + ntdll: + + // FIXME - ObjectAttributes should be POBJECT_ATTRIBUTES + pub fn NtCreateKeyedEvent(KeyedEventHandle: PHANDLE, + DesiredAccess: ACCESS_MASK, + ObjectAttributes: PVOID, + Flags: ULONG) -> NTSTATUS { + STATUS_NOT_IMPLEMENTED + } + + pub fn NtReleaseKeyedEvent(EventHandle: HANDLE, + Key: PVOID, + Alertable: BOOLEAN, + Timeout: PLARGE_INTEGER) -> NTSTATUS { + STATUS_NOT_IMPLEMENTED + } + + pub fn NtWaitForKeyedEvent(EventHandle: HANDLE, + Key: PVOID, + Alertable: BOOLEAN, + Timeout: PLARGE_INTEGER) -> NTSTATUS { + STATUS_NOT_IMPLEMENTED + } + + pub fn RtlNtStatusToDosError(Status: NTSTATUS) -> ULONG { + ERROR_CALL_NOT_IMPLEMENTED as ULONG } } diff --git a/src/libstd/sys/windows/compat.rs b/src/libstd/sys/windows/compat.rs index 3a03b91f24ed3..98aad27c2bf62 100644 --- a/src/libstd/sys/windows/compat.rs +++ b/src/libstd/sys/windows/compat.rs @@ -25,14 +25,14 @@ use prelude::v1::*; use ffi::CString; use libc::{LPVOID, LPCWSTR, HMODULE, LPCSTR}; -use sync::atomic::{AtomicUsize, Ordering}; +use sync::atomic::{AtomicPtr, Ordering}; extern "system" { fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE; fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID; } -pub fn lookup(module: &str, symbol: &str) -> Option { +pub fn lookup(module: &str, symbol: &str) -> Option<*mut ()> { let mut module: Vec = module.utf16_units().collect(); module.push(0); let symbol = CString::new(symbol).unwrap(); @@ -40,15 +40,15 @@ pub fn lookup(module: &str, symbol: &str) -> Option { let handle = GetModuleHandleW(module.as_ptr()); match GetProcAddress(handle, symbol.as_ptr()) as usize { 0 => None, - n => Some(n), + n => Some(n as *mut ()), } } } -pub fn store_func(ptr: &AtomicUsize, module: &str, symbol: &str, - fallback: usize) -> usize { +pub fn store_func(ptr: &AtomicPtr<()>, module: &str, symbol: &str, + fallback: *mut ()) -> *mut () { let value = lookup(module, symbol).unwrap_or(fallback); - ptr.store(value, Ordering::SeqCst); + ptr.store(value, Ordering::Relaxed); value } @@ -59,30 +59,109 @@ macro_rules! compat_fn { $($body:expr);* } )*) => ($( - #[allow(unused_variables)] + #[inline] pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype { - use sync::atomic::{AtomicUsize, Ordering}; + use sync::atomic::{AtomicPtr, Ordering}; use mem; type F = unsafe extern "system" fn($($argtype),*) -> $rettype; - static PTR: AtomicUsize = AtomicUsize::new(0); + static PTR: AtomicPtr<()> = AtomicPtr::new(load as *mut ()); - fn load() -> usize { - ::sys::compat::store_func(&PTR, - stringify!($module), - stringify!($symbol), - fallback as usize) + unsafe extern "system" fn load($($argname: $argtype),*) + -> $rettype { + let ptr = ::sys::compat::store_func(&PTR, + stringify!($module), + stringify!($symbol), + fallback as *mut ()); + mem::transmute::<*mut (), F>(ptr)($($argname),*) } + + #[allow(unused_variables)] unsafe extern "system" fn fallback($($argname: $argtype),*) -> $rettype { $($body);* } - let addr = match PTR.load(Ordering::SeqCst) { - 0 => load(), - n => n, - }; - mem::transmute::(addr)($($argname),*) + let ptr = PTR.load(Ordering::Relaxed); + mem::transmute::<*mut (), F>(ptr)($($argname),*) } )*) } + +macro_rules! compat_group { + ($gtype:ident, $gstatic:ident, $gload:ident, $module:ident: $( + pub fn $symbol:ident($($argname:ident: $argtype:ty),*) + -> $rettype:ty { + $($body:expr);* + } + )*) => ( + struct $gtype { + $($symbol: ::sync::atomic::AtomicPtr<()>),* + } + static $gstatic: $gtype = $gtype {$( + $symbol: ::sync::atomic::AtomicPtr::new({ + type F = unsafe extern "system" fn($($argtype),*) -> $rettype; + unsafe extern "system" fn $symbol($($argname: $argtype),*) + -> $rettype { + use self::$symbol; + $gload(); + $symbol($($argname),*) + } + $symbol as *mut () + }) + ),*}; + + fn $gload() { + use option::Option::{None, Some}; + use sync::atomic::Ordering; + use ptr; + $( + #[allow(unused_variables)] + unsafe extern "system" fn $symbol($($argname: $argtype),*) + -> $rettype { + $($body);* + } + )* + + struct FuncPtrs { + $($symbol: *mut ()),* + } + + const FALLBACKS: FuncPtrs = FuncPtrs { + $($symbol: $symbol as *mut ()),* + }; + + fn store_funcs(funcs: &FuncPtrs) { + $($gstatic.$symbol.store(funcs.$symbol, Ordering::Relaxed);)* + } + + let mut funcs: FuncPtrs = FuncPtrs { + $($symbol: ptr::null_mut()),* + }; + + $( + let ptr = ::sys::compat::lookup(stringify!($module), stringify!($symbol)); + match ptr { + Some(ptr) => { funcs.$symbol = ptr; }, + None => { + store_funcs(&FALLBACKS); + return; + } + } + )* + + store_funcs(&funcs); + } + + $( + #[inline] + pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype { + use sync::atomic::Ordering; + use mem; + type F = unsafe extern "system" fn($($argtype),*) -> $rettype; + let ptr = $gstatic.$symbol.load(Ordering::Relaxed); + mem::transmute::<*mut (), F>(ptr)($($argname),*) + } + )* + ) +} diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index 277c3d14c0ec5..873f7a3163322 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -23,136 +23,45 @@ //! //! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy //! is there there are no guarantees of fairness. -//! -//! The downside of this approach, however, is that SRWLock is not available on -//! Windows XP, so we continue to have a fallback implementation where -//! CriticalSection is used and we keep track of who's holding the mutex to -//! detect recursive locks. use prelude::v1::*; use cell::UnsafeCell; use mem; -use sync::atomic::{AtomicUsize, Ordering}; use sys::c; -use sys::compat; -pub struct Mutex { - lock: AtomicUsize, - held: UnsafeCell, -} +pub struct Mutex { inner: UnsafeCell } unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} -#[derive(Clone, Copy)] -enum Kind { - SRWLock = 1, - CriticalSection = 2, -} - #[inline] pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { - debug_assert!(mem::size_of::() <= mem::size_of_val(&m.lock)); - &m.lock as *const _ as *mut _ + m.inner.get() } impl Mutex { + #[inline] pub const fn new() -> Mutex { - Mutex { - lock: AtomicUsize::new(0), - held: UnsafeCell::new(false), - } + Mutex { inner: UnsafeCell::new(c::SRWLOCK_INIT) } } + #[inline] pub unsafe fn lock(&self) { - match kind() { - Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)), - Kind::CriticalSection => { - let re = self.remutex(); - (*re).lock(); - if !self.flag_locked() { - (*re).unlock(); - panic!("cannot recursively lock a mutex"); - } - } - } + c::AcquireSRWLockExclusive(self.inner.get()) } + #[inline] pub unsafe fn try_lock(&self) -> bool { - match kind() { - Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0, - Kind::CriticalSection => { - let re = self.remutex(); - if !(*re).try_lock() { - false - } else if self.flag_locked() { - true - } else { - (*re).unlock(); - false - } - } - } + c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 } + #[inline] pub unsafe fn unlock(&self) { - *self.held.get() = false; - match kind() { - Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)), - Kind::CriticalSection => (*self.remutex()).unlock(), - } - } - pub unsafe fn destroy(&self) { - match kind() { - Kind::SRWLock => {} - Kind::CriticalSection => { - match self.lock.load(Ordering::SeqCst) { - 0 => {} - n => { Box::from_raw(n as *mut ReentrantMutex).destroy(); } - } - } - } + c::ReleaseSRWLockExclusive(self.inner.get()) } - unsafe fn remutex(&self) -> *mut ReentrantMutex { - match self.lock.load(Ordering::SeqCst) { - 0 => {} - n => return n as *mut _, - } - let mut re = Box::new(ReentrantMutex::uninitialized()); - re.init(); - let re = Box::into_raw(re); - match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) { - 0 => re, - n => { Box::from_raw(re).destroy(); n as *mut _ } - } - } - - unsafe fn flag_locked(&self) -> bool { - if *self.held.get() { - false - } else { - *self.held.get() = true; - true - } - - } -} - -fn kind() -> Kind { - static KIND: AtomicUsize = AtomicUsize::new(0); - - let val = KIND.load(Ordering::SeqCst); - if val == Kind::SRWLock as usize { - return Kind::SRWLock - } else if val == Kind::CriticalSection as usize { - return Kind::CriticalSection + #[inline] + pub unsafe fn destroy(&self) { + // ... } - - let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") { - None => Kind::CriticalSection, - Some(..) => Kind::SRWLock, - }; - KIND.store(ret as usize, Ordering::SeqCst); - return ret; } pub struct ReentrantMutex { inner: UnsafeCell }