diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 3b79e0c4f8267..606ba488b6fb7 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -473,16 +473,21 @@ pub mod f64; pub mod thread; pub mod ascii; pub mod collections; +#[cfg(not(target_os = "cloudabi"))] pub mod env; pub mod error; pub mod ffi; +#[cfg(not(target_os = "cloudabi"))] pub mod fs; pub mod io; +#[cfg(not(target_os = "cloudabi"))] pub mod net; pub mod num; pub mod os; pub mod panic; +#[cfg(not(target_os = "cloudabi"))] pub mod path; +#[cfg(not(target_os = "cloudabi"))] pub mod process; pub mod sync; pub mod time; diff --git a/src/libstd/sys/cloudabi/abi/mod.rs b/src/libstd/sys/cloudabi/abi/mod.rs new file mode 100644 index 0000000000000..81a4d29342301 --- /dev/null +++ b/src/libstd/sys/cloudabi/abi/mod.rs @@ -0,0 +1,13 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[allow(warnings)] +mod cloudabi; +pub use self::cloudabi::*; diff --git a/src/libstd/sys/cloudabi/args.rs b/src/libstd/sys/cloudabi/args.rs new file mode 100644 index 0000000000000..f076ab5189701 --- /dev/null +++ b/src/libstd/sys/cloudabi/args.rs @@ -0,0 +1,15 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[allow(dead_code)] +pub fn init(_: isize, _: *const *const u8) {} + +#[allow(dead_code)] +pub fn cleanup() {} diff --git a/src/libstd/sys/cloudabi/backtrace.rs b/src/libstd/sys/cloudabi/backtrace.rs new file mode 100644 index 0000000000000..33d931792375d --- /dev/null +++ b/src/libstd/sys/cloudabi/backtrace.rs @@ -0,0 +1,121 @@ +// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use error::Error; +use ffi::CStr; +use intrinsics; +use io; +use libc; +use sys_common::backtrace::Frame; +use unwind as uw; + +pub struct BacktraceContext; + +struct Context<'a> { + idx: usize, + frames: &'a mut [Frame], +} + +#[derive(Debug)] +struct UnwindError(uw::_Unwind_Reason_Code); + +impl Error for UnwindError { + fn description(&self) -> &'static str { + "unexpected return value while unwinding" + } +} + +impl ::fmt::Display for UnwindError { + fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { + write!(f, "{}: {:?}", self.description(), self.0) + } +} + +#[inline(never)] // if we know this is a function call, we can skip it when + // tracing +pub fn unwind_backtrace(frames: &mut [Frame]) -> io::Result<(usize, BacktraceContext)> { + let mut cx = Context { idx: 0, frames }; + let result_unwind = + unsafe { uw::_Unwind_Backtrace(trace_fn, &mut cx as *mut Context as *mut libc::c_void) }; + // See libunwind:src/unwind/Backtrace.c for the return values. + // No, there is no doc. + match result_unwind { + // These return codes seem to be benign and need to be ignored for backtraces + // to show up properly on all tested platforms. + uw::_URC_END_OF_STACK | uw::_URC_FATAL_PHASE1_ERROR | uw::_URC_FAILURE => { + Ok((cx.idx, BacktraceContext)) + } + _ => Err(io::Error::new( + io::ErrorKind::Other, + UnwindError(result_unwind), + )), + } +} + +extern "C" fn trace_fn( + ctx: *mut uw::_Unwind_Context, + arg: *mut libc::c_void, +) -> uw::_Unwind_Reason_Code { + let cx = unsafe { &mut *(arg as *mut Context) }; + let mut ip_before_insn = 0; + let mut ip = unsafe { uw::_Unwind_GetIPInfo(ctx, &mut ip_before_insn) as *mut libc::c_void }; + if !ip.is_null() && ip_before_insn == 0 { + // this is a non-signaling frame, so `ip` refers to the address + // after the calling instruction. account for that. + ip = (ip as usize - 1) as *mut _; + } + + let symaddr = unsafe { uw::_Unwind_FindEnclosingFunction(ip) }; + if cx.idx < cx.frames.len() { + cx.frames[cx.idx] = Frame { + symbol_addr: symaddr as *mut u8, + exact_position: ip as *mut u8, + }; + cx.idx += 1; + } + + uw::_URC_NO_REASON +} + +pub fn foreach_symbol_fileline(_: Frame, _: F, _: &BacktraceContext) -> io::Result +where + F: FnMut(&[u8], u32) -> io::Result<()>, +{ + // No way to obtain this information on CloudABI. + Ok(false) +} + +pub fn resolve_symname(frame: Frame, callback: F, _: &BacktraceContext) -> io::Result<()> +where + F: FnOnce(Option<&str>) -> io::Result<()>, +{ + unsafe { + let mut info: Dl_info = intrinsics::init(); + let symname = + if dladdr(frame.exact_position as *mut _, &mut info) == 0 || info.dli_sname.is_null() { + None + } else { + CStr::from_ptr(info.dli_sname).to_str().ok() + }; + callback(symname) + } +} + +#[repr(C)] +struct Dl_info { + dli_fname: *const libc::c_char, + dli_fbase: *mut libc::c_void, + dli_sname: *const libc::c_char, + dli_saddr: *mut libc::c_void, +} + +extern "C" { + fn dladdr(addr: *const libc::c_void, info: *mut Dl_info) -> libc::c_int; +} diff --git a/src/libstd/sys/cloudabi/condvar.rs b/src/libstd/sys/cloudabi/condvar.rs new file mode 100644 index 0000000000000..c05c837ade274 --- /dev/null +++ b/src/libstd/sys/cloudabi/condvar.rs @@ -0,0 +1,169 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use mem; +use sync::atomic::{AtomicU32, Ordering}; +use sys::cloudabi::abi; +use sys::mutex::{self, Mutex}; +use sys::time::dur2intervals; +use time::Duration; + +extern "C" { + #[thread_local] + static __pthread_thread_id: abi::tid; +} + +pub struct Condvar { + condvar: UnsafeCell, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { + condvar: UnsafeCell::new(AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0)), + } + } + + pub unsafe fn init(&mut self) {} + + pub unsafe fn notify_one(&self) { + let condvar = self.condvar.get(); + if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 { + let ret = abi::condvar_signal(condvar as *mut abi::condvar, abi::scope::PRIVATE, 1); + assert_eq!( + ret, + abi::errno::SUCCESS, + "Failed to signal on condition variable" + ); + } + } + + pub unsafe fn notify_all(&self) { + let condvar = self.condvar.get(); + if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 { + let ret = abi::condvar_signal( + condvar as *mut abi::condvar, + abi::scope::PRIVATE, + abi::nthreads::max_value(), + ); + assert_eq!( + ret, + abi::errno::SUCCESS, + "Failed to broadcast on condition variable" + ); + } + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + let mutex = mutex::raw(mutex); + assert_eq!( + (*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This lock is not write-locked by this thread" + ); + + // Call into the kernel to wait on the condition variable. + let condvar = self.condvar.get(); + let subscription = abi::subscription { + type_: abi::eventtype::CONDVAR, + union: abi::subscription_union { + condvar: abi::subscription_condvar { + condvar: condvar as *mut abi::condvar, + condvar_scope: abi::scope::PRIVATE, + lock: mutex as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event: abi::event = mem::uninitialized(); + let mut nevents: usize = mem::uninitialized(); + let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + assert_eq!( + ret, + abi::errno::SUCCESS, + "Failed to wait on condition variable" + ); + assert_eq!( + event.error, + abi::errno::SUCCESS, + "Failed to wait on condition variable" + ); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let mutex = mutex::raw(mutex); + assert_eq!( + (*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This lock is not write-locked by this thread" + ); + + // Call into the kernel to wait on the condition variable. + let condvar = self.condvar.get(); + let subscriptions = [ + abi::subscription { + type_: abi::eventtype::CONDVAR, + union: abi::subscription_union { + condvar: abi::subscription_condvar { + condvar: condvar as *mut abi::condvar, + condvar_scope: abi::scope::PRIVATE, + lock: mutex as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }, + abi::subscription { + type_: abi::eventtype::CLOCK, + union: abi::subscription_union { + clock: abi::subscription_clock { + clock_id: abi::clockid::MONOTONIC, + timeout: dur2intervals(&dur), + ..mem::zeroed() + }, + }, + ..mem::zeroed() + }, + ]; + let mut events: [abi::event; 2] = mem::uninitialized(); + let mut nevents: usize = mem::uninitialized(); + let ret = abi::poll(subscriptions.as_ptr(), events.as_mut_ptr(), 2, &mut nevents); + assert_eq!( + ret, + abi::errno::SUCCESS, + "Failed to wait on condition variable" + ); + for i in 0..nevents { + assert_eq!( + events[i].error, + abi::errno::SUCCESS, + "Failed to wait on condition variable" + ); + if events[i].type_ == abi::eventtype::CONDVAR { + return true; + } + } + false + } + + pub unsafe fn destroy(&self) { + let condvar = self.condvar.get(); + assert_eq!( + (*condvar).load(Ordering::Relaxed), + abi::CONDVAR_HAS_NO_WAITERS.0, + "Attempted to destroy a condition variable with blocked threads" + ); + } +} diff --git a/src/libstd/sys/cloudabi/mod.rs b/src/libstd/sys/cloudabi/mod.rs new file mode 100644 index 0000000000000..06f45fafde780 --- /dev/null +++ b/src/libstd/sys/cloudabi/mod.rs @@ -0,0 +1,73 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use io; +use libc; +use mem; + +pub mod args; +#[cfg(feature = "backtrace")] +pub mod backtrace; +#[path = "../unix/cmath.rs"] +pub mod cmath; +pub mod condvar; +#[path = "../unix/memchr.rs"] +pub mod memchr; +pub mod mutex; +pub mod os; +#[path = "../unix/os_str.rs"] +pub mod os_str; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +#[path = "../unix/thread_local.rs"] +pub mod thread_local; +pub mod time; + +mod abi; + +#[allow(dead_code)] +pub fn init() {} + +pub fn decode_error_kind(errno: i32) -> io::ErrorKind { + match errno { + x if x == abi::errno::ACCES as i32 => io::ErrorKind::PermissionDenied, + x if x == abi::errno::ADDRINUSE as i32 => io::ErrorKind::AddrInUse, + x if x == abi::errno::ADDRNOTAVAIL as i32 => io::ErrorKind::AddrNotAvailable, + x if x == abi::errno::AGAIN as i32 => io::ErrorKind::WouldBlock, + x if x == abi::errno::CONNABORTED as i32 => io::ErrorKind::ConnectionAborted, + x if x == abi::errno::CONNREFUSED as i32 => io::ErrorKind::ConnectionRefused, + x if x == abi::errno::CONNRESET as i32 => io::ErrorKind::ConnectionReset, + x if x == abi::errno::EXIST as i32 => io::ErrorKind::AlreadyExists, + x if x == abi::errno::INTR as i32 => io::ErrorKind::Interrupted, + x if x == abi::errno::INVAL as i32 => io::ErrorKind::InvalidInput, + x if x == abi::errno::NOENT as i32 => io::ErrorKind::NotFound, + x if x == abi::errno::NOTCONN as i32 => io::ErrorKind::NotConnected, + x if x == abi::errno::PERM as i32 => io::ErrorKind::PermissionDenied, + x if x == abi::errno::PIPE as i32 => io::ErrorKind::BrokenPipe, + x if x == abi::errno::TIMEDOUT as i32 => io::ErrorKind::TimedOut, + _ => io::ErrorKind::Other, + } +} + +pub unsafe fn abort_internal() -> ! { + ::core::intrinsics::abort(); +} + +pub use libc::strlen; + +pub fn hashmap_random_keys() -> (u64, u64) { + unsafe { + let mut v = mem::uninitialized(); + libc::arc4random_buf(&mut v as *mut _ as *mut libc::c_void, mem::size_of_val(&v)); + v + } +} diff --git a/src/libstd/sys/cloudabi/mutex.rs b/src/libstd/sys/cloudabi/mutex.rs new file mode 100644 index 0000000000000..d4ba6bcfc8062 --- /dev/null +++ b/src/libstd/sys/cloudabi/mutex.rs @@ -0,0 +1,158 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use mem; +use sync::atomic::{AtomicU32, Ordering}; +use sys::cloudabi::abi; +use sys::rwlock::{self, RWLock}; + +extern "C" { + #[thread_local] + static __pthread_thread_id: abi::tid; +} + +// Implement Mutex using an RWLock. This doesn't introduce any +// performance overhead in this environment, as the operations would be +// implemented identically. +pub struct Mutex(RWLock); + +pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 { + rwlock::raw(&m.0) +} + +impl Mutex { + pub const fn new() -> Mutex { + Mutex(RWLock::new()) + } + + pub unsafe fn init(&mut self) { + // This function should normally reinitialize the mutex after + // moving it to a different memory address. This implementation + // does not require adjustments after moving. + } + + pub unsafe fn try_lock(&self) -> bool { + self.0.try_write() + } + + pub unsafe fn lock(&self) { + self.0.write() + } + + pub unsafe fn unlock(&self) { + self.0.write_unlock() + } + + pub unsafe fn destroy(&self) { + self.0.destroy() + } +} + +pub struct ReentrantMutex { + lock: UnsafeCell, + recursion: UnsafeCell, +} + +impl ReentrantMutex { + pub unsafe fn uninitialized() -> ReentrantMutex { + mem::uninitialized() + } + + pub unsafe fn init(&mut self) { + self.lock = UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)); + self.recursion = UnsafeCell::new(0); + } + + pub unsafe fn try_lock(&self) -> bool { + // Attempt to acquire the lock. + let lock = self.lock.get(); + let recursion = self.recursion.get(); + if let Err(old) = (*lock).compare_exchange( + abi::LOCK_UNLOCKED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + Ordering::Acquire, + Ordering::Relaxed, + ) { + // If we fail to acquire the lock, it may be the case + // that we've already acquired it and may need to recurse. + if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 { + *recursion += 1; + true + } else { + false + } + } else { + // Success. + assert_eq!(*recursion, 0, "Mutex has invalid recursion count"); + true + } + } + + pub unsafe fn lock(&self) { + if !self.try_lock() { + // Call into the kernel to acquire a write lock. + let lock = self.lock.get(); + let subscription = abi::subscription { + type_: abi::eventtype::LOCK_WRLOCK, + union: abi::subscription_union { + lock: abi::subscription_lock { + lock: lock as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event: abi::event = mem::uninitialized(); + let mut nevents: usize = mem::uninitialized(); + let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex"); + assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex"); + } + } + + pub unsafe fn unlock(&self) { + let lock = self.lock.get(); + let recursion = self.recursion.get(); + assert_eq!( + (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This mutex is locked by a different thread" + ); + + if *recursion > 0 { + *recursion -= 1; + } else if !(*lock) + .compare_exchange( + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + abi::LOCK_UNLOCKED.0, + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + // Lock is managed by kernelspace. Call into the kernel + // to unblock waiting threads. + let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex"); + } + } + + pub unsafe fn destroy(&self) { + let lock = self.lock.get(); + let recursion = self.recursion.get(); + assert_eq!( + (*lock).load(Ordering::Relaxed), + abi::LOCK_UNLOCKED.0, + "Attempted to destroy locked mutex" + ); + assert_eq!(*recursion, 0, "Recursion counter invalid"); + } +} diff --git a/src/libstd/sys/cloudabi/os.rs b/src/libstd/sys/cloudabi/os.rs new file mode 100644 index 0000000000000..93f8161964572 --- /dev/null +++ b/src/libstd/sys/cloudabi/os.rs @@ -0,0 +1,31 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ffi::CStr; +use libc::{self, c_int}; +use str; + +pub fn errno() -> i32 { + extern "C" { + #[thread_local] + static errno: c_int; + } + + unsafe { errno as i32 } +} + +/// Gets a detailed string description for the given error number. +pub fn error_string(errno: i32) -> String { + // cloudlibc's strerror() is guaranteed to be thread-safe. There is + // thus no need to use strerror_r(). + str::from_utf8(unsafe { CStr::from_ptr(libc::strerror(errno)) }.to_bytes()) + .unwrap() + .to_owned() +} diff --git a/src/libstd/sys/cloudabi/rwlock.rs b/src/libstd/sys/cloudabi/rwlock.rs new file mode 100644 index 0000000000000..8539aec5e2c07 --- /dev/null +++ b/src/libstd/sys/cloudabi/rwlock.rs @@ -0,0 +1,237 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use mem; +use sync::atomic::{AtomicU32, Ordering}; +use sys::cloudabi::abi; + +extern "C" { + #[thread_local] + static __pthread_thread_id: abi::tid; +} + +#[thread_local] +static mut RDLOCKS_ACQUIRED: u32 = 0; + +pub struct RWLock { + lock: UnsafeCell, +} + +pub unsafe fn raw(r: &RWLock) -> *mut AtomicU32 { + r.lock.get() +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { + lock: UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)), + } + } + + pub unsafe fn try_read(&self) -> bool { + let lock = self.lock.get(); + let mut old = abi::LOCK_UNLOCKED.0; + while let Err(cur) = + (*lock).compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed) + { + if (cur & abi::LOCK_WRLOCKED.0) != 0 { + // Another thread already has a write lock. + assert_ne!( + old & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "Attempted to acquire a read lock while holding a write lock" + ); + return false; + } else if (old & abi::LOCK_KERNEL_MANAGED.0) != 0 && RDLOCKS_ACQUIRED == 0 { + // Lock has threads waiting for the lock. Only acquire + // the lock if we have already acquired read locks. In + // that case, it is justified to acquire this lock to + // prevent a deadlock. + return false; + } + old = cur; + } + + RDLOCKS_ACQUIRED += 1; + true + } + + pub unsafe fn read(&self) { + if !self.try_read() { + // Call into the kernel to acquire a read lock. + let lock = self.lock.get(); + let subscription = abi::subscription { + type_: abi::eventtype::LOCK_RDLOCK, + union: abi::subscription_union { + lock: abi::subscription_lock { + lock: lock as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event: abi::event = mem::uninitialized(); + let mut nevents: usize = mem::uninitialized(); + let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire read lock"); + assert_eq!( + event.error, + abi::errno::SUCCESS, + "Failed to acquire read lock" + ); + + RDLOCKS_ACQUIRED += 1; + } + } + + pub unsafe fn read_unlock(&self) { + // Perform a read unlock. We can do this in userspace, except when + // other threads are blocked and we are performing the last unlock. + // In that case, call into the kernel. + // + // Other threads may attempt to increment the read lock count, + // meaning that the call into the kernel could be spurious. To + // prevent this from happening, upgrade to a write lock first. This + // allows us to call into the kernel, having the guarantee that the + // lock value will not change in the meantime. + assert!(RDLOCKS_ACQUIRED > 0, "Bad lock count"); + let mut old = 1; + loop { + let lock = self.lock.get(); + if old == 1 | abi::LOCK_KERNEL_MANAGED.0 { + // Last read lock while threads are waiting. Attempt to upgrade + // to a write lock before calling into the kernel to unlock. + if let Err(cur) = (*lock).compare_exchange_weak( + old, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 | abi::LOCK_KERNEL_MANAGED.0, + Ordering::Acquire, + Ordering::Relaxed, + ) { + old = cur; + } else { + // Call into the kernel to unlock. + let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock"); + break; + } + } else { + // No threads waiting or not the last read lock. Just decrement + // the read lock count. + assert_ne!( + old & !abi::LOCK_KERNEL_MANAGED.0, + 0, + "This rwlock is not locked" + ); + assert_eq!( + old & abi::LOCK_WRLOCKED.0, + 0, + "Attempted to read-unlock a write-locked rwlock" + ); + if let Err(cur) = (*lock).compare_exchange_weak( + old, + old - 1, + Ordering::Acquire, + Ordering::Relaxed, + ) { + old = cur; + } else { + break; + } + } + } + + RDLOCKS_ACQUIRED -= 1; + } + + pub unsafe fn try_write(&self) -> bool { + // Attempt to acquire the lock. + let lock = self.lock.get(); + if let Err(old) = (*lock).compare_exchange( + abi::LOCK_UNLOCKED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + Ordering::Acquire, + Ordering::Relaxed, + ) { + // Failure. Crash upon recursive acquisition. + assert_ne!( + old & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "Attempted to recursive write-lock a rwlock", + ); + false + } else { + // Success. + true + } + } + + pub unsafe fn write(&self) { + if !self.try_write() { + // Call into the kernel to acquire a write lock. + let lock = self.lock.get(); + let subscription = abi::subscription { + type_: abi::eventtype::LOCK_WRLOCK, + union: abi::subscription_union { + lock: abi::subscription_lock { + lock: lock as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event: abi::event = mem::uninitialized(); + let mut nevents: usize = mem::uninitialized(); + let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire write lock"); + assert_eq!( + event.error, + abi::errno::SUCCESS, + "Failed to acquire write lock" + ); + } + } + + pub unsafe fn write_unlock(&self) { + let lock = self.lock.get(); + assert_eq!( + (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This rwlock is not write-locked by this thread" + ); + + if !(*lock) + .compare_exchange( + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + abi::LOCK_UNLOCKED.0, + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + // Lock is managed by kernelspace. Call into the kernel + // to unblock waiting threads. + let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock"); + } + } + + pub unsafe fn destroy(&self) { + let lock = self.lock.get(); + assert_eq!( + (*lock).load(Ordering::Relaxed), + abi::LOCK_UNLOCKED.0, + "Attempted to destroy locked rwlock" + ); + } +} diff --git a/src/libstd/sys/cloudabi/stack_overflow.rs b/src/libstd/sys/cloudabi/stack_overflow.rs new file mode 100644 index 0000000000000..5c0b1e5671e1b --- /dev/null +++ b/src/libstd/sys/cloudabi/stack_overflow.rs @@ -0,0 +1,23 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg_attr(test, allow(dead_code))] + +pub struct Handler; + +impl Handler { + pub unsafe fn new() -> Handler { + Handler + } +} + +pub unsafe fn init() {} + +pub unsafe fn cleanup() {} diff --git a/src/libstd/sys/cloudabi/stdio.rs b/src/libstd/sys/cloudabi/stdio.rs new file mode 100644 index 0000000000000..9519a92647108 --- /dev/null +++ b/src/libstd/sys/cloudabi/stdio.rs @@ -0,0 +1,79 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use io; +use sys::cloudabi::abi; + +pub struct Stdin(()); +pub struct Stdout(()); +pub struct Stderr(()); + +impl Stdin { + pub fn new() -> io::Result { + Ok(Stdin(())) + } + + pub fn read(&self, _: &mut [u8]) -> io::Result { + Ok(0) + } +} + +impl Stdout { + pub fn new() -> io::Result { + Ok(Stdout(())) + } + + pub fn write(&self, _: &[u8]) -> io::Result { + Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "Stdout is not connected to any output in this environment", + )) + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result { + Ok(Stderr(())) + } + + pub fn write(&self, _: &[u8]) -> io::Result { + Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "Stderr is not connected to any output in this environment", + )) + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } +} + +// FIXME: right now this raw stderr handle is used in a few places because +// std::io::stderr_raw isn't exposed, but once that's exposed this impl +// should go away +impl io::Write for Stderr { + fn write(&mut self, data: &[u8]) -> io::Result { + Stderr::write(self, data) + } + + fn flush(&mut self) -> io::Result<()> { + Stderr::flush(self) + } +} + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(abi::errno::BADF as i32) +} + +pub const STDIN_BUF_SIZE: usize = ::sys_common::io::DEFAULT_BUF_SIZE; diff --git a/src/libstd/sys/cloudabi/thread.rs b/src/libstd/sys/cloudabi/thread.rs new file mode 100644 index 0000000000000..c980ae75261ca --- /dev/null +++ b/src/libstd/sys/cloudabi/thread.rs @@ -0,0 +1,124 @@ +// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use alloc::boxed::FnBox; +use cmp; +use ffi::CStr; +use io; +use libc; +use mem; +use ptr; +use sys::cloudabi::abi; +use sys::time::dur2intervals; +use sys_common::thread::*; +use time::Duration; + +pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024; + +pub struct Thread { + id: libc::pthread_t, +} + +// CloudABI has pthread_t as a pointer in which case we still want +// a thread to be Send/Sync +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + +impl Thread { + pub unsafe fn new<'a>(stack: usize, p: Box) -> io::Result { + let p = box p; + let mut native: libc::pthread_t = mem::zeroed(); + let mut attr: libc::pthread_attr_t = mem::zeroed(); + assert_eq!(libc::pthread_attr_init(&mut attr), 0); + + let stack_size = cmp::max(stack, min_stack_size(&attr)); + assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0); + + let ret = libc::pthread_create(&mut native, &attr, thread_start, &*p as *const _ as *mut _); + assert_eq!(libc::pthread_attr_destroy(&mut attr), 0); + + return if ret != 0 { + Err(io::Error::from_raw_os_error(ret)) + } else { + mem::forget(p); // ownership passed to pthread_create + Ok(Thread { id: native }) + }; + + extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { + unsafe { + start_thread(main as *mut u8); + } + ptr::null_mut() + } + } + + pub fn yield_now() { + let ret = unsafe { abi::thread_yield() }; + debug_assert_eq!(ret, abi::errno::SUCCESS); + } + + pub fn set_name(_name: &CStr) { + // CloudABI has no way to set a thread name. + } + + pub fn sleep(dur: Duration) { + unsafe { + let subscription = abi::subscription { + type_: abi::eventtype::CLOCK, + union: abi::subscription_union { + clock: abi::subscription_clock { + clock_id: abi::clockid::MONOTONIC, + timeout: dur2intervals(&dur), + ..mem::zeroed() + }, + }, + ..mem::zeroed() + }; + let mut event: abi::event = mem::uninitialized(); + let mut nevents: usize = mem::uninitialized(); + let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + assert_eq!(ret, abi::errno::SUCCESS); + assert_eq!(event.error, abi::errno::SUCCESS); + } + } + + pub fn join(self) { + unsafe { + let ret = libc::pthread_join(self.id, ptr::null_mut()); + mem::forget(self); + assert!( + ret == 0, + "failed to join thread: {}", + io::Error::from_raw_os_error(ret) + ); + } + } +} + +impl Drop for Thread { + fn drop(&mut self) { + let ret = unsafe { libc::pthread_detach(self.id) }; + debug_assert_eq!(ret, 0); + } +} + +#[cfg_attr(test, allow(dead_code))] +pub mod guard { + pub unsafe fn current() -> Option { + None + } + pub unsafe fn init() -> Option { + None + } +} + +fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { + libc::PTHREAD_STACK_MIN +} diff --git a/src/libstd/sys/cloudabi/time.rs b/src/libstd/sys/cloudabi/time.rs new file mode 100644 index 0000000000000..ee12731619aac --- /dev/null +++ b/src/libstd/sys/cloudabi/time.rs @@ -0,0 +1,111 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use mem; +use sys::cloudabi::abi; +use time::Duration; + +const NSEC_PER_SEC: abi::timestamp = 1_000_000_000; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant { + t: abi::timestamp, +} + +pub fn dur2intervals(dur: &Duration) -> abi::timestamp { + dur.as_secs() + .checked_mul(NSEC_PER_SEC) + .and_then(|nanos| nanos.checked_add(dur.subsec_nanos() as abi::timestamp)) + .expect("overflow converting duration to nanoseconds") +} + +impl Instant { + pub fn now() -> Instant { + unsafe { + let mut t = mem::uninitialized(); + let ret = abi::clock_time_get(abi::clockid::MONOTONIC, 0, &mut t); + assert_eq!(ret, abi::errno::SUCCESS); + Instant { t: t } + } + } + + pub fn sub_instant(&self, other: &Instant) -> Duration { + let diff = self.t + .checked_sub(other.t) + .expect("second instant is later than self"); + Duration::new(diff / NSEC_PER_SEC, (diff % NSEC_PER_SEC) as u32) + } + + pub fn add_duration(&self, other: &Duration) -> Instant { + Instant { + t: self.t + .checked_add(dur2intervals(other)) + .expect("overflow when adding duration to instant"), + } + } + + pub fn sub_duration(&self, other: &Duration) -> Instant { + Instant { + t: self.t + .checked_sub(dur2intervals(other)) + .expect("overflow when subtracting duration from instant"), + } + } +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime { + t: abi::timestamp, +} + +impl SystemTime { + pub fn now() -> SystemTime { + unsafe { + let mut t = mem::uninitialized(); + let ret = abi::clock_time_get(abi::clockid::REALTIME, 0, &mut t); + assert_eq!(ret, abi::errno::SUCCESS); + SystemTime { t: t } + } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result { + if self.t >= other.t { + let diff = self.t - other.t; + Ok(Duration::new( + diff / NSEC_PER_SEC, + (diff % NSEC_PER_SEC) as u32, + )) + } else { + let diff = other.t - self.t; + Err(Duration::new( + diff / NSEC_PER_SEC, + (diff % NSEC_PER_SEC) as u32, + )) + } + } + + pub fn add_duration(&self, other: &Duration) -> SystemTime { + SystemTime { + t: self.t + .checked_add(dur2intervals(other)) + .expect("overflow when adding duration to instant"), + } + } + + pub fn sub_duration(&self, other: &Duration) -> SystemTime { + SystemTime { + t: self.t + .checked_sub(dur2intervals(other)) + .expect("overflow when subtracting duration from instant"), + } + } +} + +pub const UNIX_EPOCH: SystemTime = SystemTime { t: 0 }; diff --git a/src/libstd/sys/mod.rs b/src/libstd/sys/mod.rs index be8cb88416bb6..a853d8ea69dc7 100644 --- a/src/libstd/sys/mod.rs +++ b/src/libstd/sys/mod.rs @@ -39,6 +39,9 @@ cfg_if! { } else if #[cfg(windows)] { mod windows; pub use self::windows::*; + } else if #[cfg(target_os = "cloudabi")] { + mod cloudabi; + pub use self::cloudabi::*; } else if #[cfg(target_os = "redox")] { mod redox; pub use self::redox::*; diff --git a/src/libstd/sys_common/mod.rs b/src/libstd/sys_common/mod.rs index 938e865680806..ee71e62e4ca12 100644 --- a/src/libstd/sys_common/mod.rs +++ b/src/libstd/sys_common/mod.rs @@ -43,6 +43,7 @@ pub mod thread_local; pub mod util; pub mod wtf8; pub mod bytestring; +#[cfg(not(target_os = "cloudabi"))] pub mod process; cfg_if! { @@ -50,6 +51,8 @@ cfg_if! { pub use sys::net; } else if #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] { pub use sys::net; + } else if #[cfg(target_os = "cloudabi")] { + // No networking support on CloudABI (yet). } else { pub mod net; }