From fc4cdbf4f5c8295f5111118d7f500ce1eae6b71c Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 17 Sep 2016 05:17:28 +0100 Subject: [PATCH] Clean up the stack implementations close #54 --- src/lib.rs | 18 +-------- src/slice_stack.rs | 25 ------------ src/{stack.rs => stack/mod.rs} | 15 ++++++- src/{ => stack}/os/mod.rs | 28 +++++++------ src/{ => stack}/os/sys.rs | 0 src/{ => stack}/owned_stack.rs | 10 +++-- src/stack/slice_stack.rs | 44 ++++++++++++++++++++ tests/generator.rs | 2 +- tests/stack.rs | 73 +++++++++++++++++++++++++++++++--- 9 files changed, 149 insertions(+), 66 deletions(-) delete mode 100644 src/slice_stack.rs rename src/{stack.rs => stack/mod.rs} (86%) rename src/{ => stack}/os/mod.rs (78%) rename src/{ => stack}/os/sys.rs (100%) rename src/{ => stack}/owned_stack.rs (80%) create mode 100644 src/stack/slice_stack.rs diff --git a/src/lib.rs b/src/lib.rs index d111301..f9764dd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -33,17 +33,9 @@ #[macro_use] extern crate std; -pub use stack::Stack; -pub use stack::GuardedStack; -pub use slice_stack::SliceStack; +pub use stack::*; pub use generator::Generator; -#[cfg(feature = "alloc")] -pub use owned_stack::OwnedStack; - -#[cfg(unix)] -pub use os::Stack as OsStack; - mod arch; /// Minimum alignment of a stack base address on the target platform. @@ -51,12 +43,6 @@ pub const STACK_ALIGNMENT: usize = arch::STACK_ALIGNMENT; mod debug; -mod stack; -mod slice_stack; pub mod generator; -#[cfg(feature = "alloc")] -mod owned_stack; - -#[cfg(unix)] -mod os; +mod stack; diff --git a/src/slice_stack.rs b/src/slice_stack.rs deleted file mode 100644 index 0af9502..0000000 --- a/src/slice_stack.rs +++ /dev/null @@ -1,25 +0,0 @@ -// This file is part of libfringe, a low-level green threading library. -// Copyright (c) whitequark -// See the LICENSE file included in this distribution. - -/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice. -/// -/// Any slice used in a SliceStack must adhere to the [Stack contract][contract]. -/// [contract]: trait.Stack.html -#[derive(Debug)] -pub struct SliceStack<'a>(pub &'a mut [u8]); - -impl<'a> ::stack::Stack for SliceStack<'a> { - #[inline(always)] - fn base(&self) -> *mut u8 { - // The slice cannot wrap around the address space, so the conversion from usize - // to isize will not wrap either. - let len: isize = self.0.len() as isize; - unsafe { self.limit().offset(len) } - } - - #[inline(always)] - fn limit(&self) -> *mut u8 { - self.0.as_ptr() as *mut u8 - } -} diff --git a/src/stack.rs b/src/stack/mod.rs similarity index 86% rename from src/stack.rs rename to src/stack/mod.rs index 39afc39..b0c7fac 100644 --- a/src/stack.rs +++ b/src/stack/mod.rs @@ -6,6 +6,19 @@ // copied, modified, or distributed except according to those terms. //! Traits for stacks. +mod slice_stack; +pub use stack::slice_stack::SliceStack; + +#[cfg(feature = "alloc")] +mod owned_stack; +#[cfg(feature = "alloc")] +pub use stack::owned_stack::OwnedStack; + +#[cfg(unix)] +mod os; +#[cfg(unix)] +pub use stack::os::OsStack; + /// A trait for objects that hold ownership of a stack. /// /// To preserve memory safety, an implementation of this trait must fulfill @@ -16,7 +29,7 @@ /// * Every address between the base and the limit must be readable and writable. /// /// [align]: constant.STACK_ALIGNMENT.html -pub trait Stack { +pub unsafe trait Stack { /// Returns the base address of the stack. /// On all modern architectures, the stack grows downwards, /// so this is the highest address. diff --git a/src/os/mod.rs b/src/stack/os/mod.rs similarity index 78% rename from src/os/mod.rs rename to src/stack/os/mod.rs index 4fd624e..7c3fdd6 100644 --- a/src/os/mod.rs +++ b/src/stack/os/mod.rs @@ -5,27 +5,28 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. extern crate std; + use self::std::io::Error as IoError; -use stack; +use stack::{Stack, GuardedStack}; mod sys; /// OsStack holds a guarded stack allocated using the operating system's anonymous /// memory mapping facility. #[derive(Debug)] -pub struct Stack { +pub struct OsStack { ptr: *mut u8, - len: usize + len: usize, } -unsafe impl Send for Stack {} +unsafe impl Send for OsStack {} -impl Stack { +impl OsStack { /// Allocates a new stack with at least `size` accessible bytes. - /// `size` is rounded up to an integral number of pages; `Stack::new(0)` is legal + /// `size` is rounded up to an integral number of pages; `OsStack::new(0)` is legal /// and allocates the smallest possible stack, consisting of one data page and /// one guard page. - pub fn new(size: usize) -> Result { + pub fn new(size: usize) -> Result { let page_size = sys::page_size(); // Stacks have to be at least one page long. @@ -39,9 +40,10 @@ impl Stack { let len = len + page_size; // Allocate a stack. - let stack = Stack { - ptr: try!(unsafe { sys::map_stack(len) }), - len: len + let ptr = try!(unsafe { sys::map_stack(len) }); + let stack = OsStack { + ptr: ptr, + len: len, }; // Mark the guard page. If this fails, `stack` will be dropped, @@ -52,7 +54,7 @@ impl Stack { } } -impl stack::Stack for Stack { +unsafe impl Stack for OsStack { #[inline(always)] fn base(&self) -> *mut u8 { unsafe { @@ -68,9 +70,9 @@ impl stack::Stack for Stack { } } -unsafe impl stack::GuardedStack for Stack {} +unsafe impl GuardedStack for OsStack {} -impl Drop for Stack { +impl Drop for OsStack { fn drop(&mut self) { unsafe { sys::unmap_stack(self.ptr, self.len) }.expect("cannot unmap stack") } diff --git a/src/os/sys.rs b/src/stack/os/sys.rs similarity index 100% rename from src/os/sys.rs rename to src/stack/os/sys.rs diff --git a/src/owned_stack.rs b/src/stack/owned_stack.rs similarity index 80% rename from src/owned_stack.rs rename to src/stack/owned_stack.rs index e914690..42b9791 100644 --- a/src/owned_stack.rs +++ b/src/stack/owned_stack.rs @@ -6,6 +6,7 @@ extern crate alloc; use core::slice; use self::alloc::heap; use self::alloc::boxed::Box; +use stack::Stack; /// OwnedStack holds a non-guarded, heap-allocated stack. #[derive(Debug)] @@ -16,18 +17,19 @@ impl OwnedStack { /// for the current platform using the default Rust allocator. pub fn new(size: usize) -> OwnedStack { unsafe { - let ptr = heap::allocate(size, ::STACK_ALIGNMENT); - OwnedStack(Box::from_raw(slice::from_raw_parts_mut(ptr, size))) + let aligned_size = size & !(::STACK_ALIGNMENT - 1); + let ptr = heap::allocate(aligned_size, ::STACK_ALIGNMENT); + OwnedStack(Box::from_raw(slice::from_raw_parts_mut(ptr, aligned_size))) } } } -impl ::stack::Stack for OwnedStack { +unsafe impl Stack for OwnedStack { #[inline(always)] fn base(&self) -> *mut u8 { // The slice cannot wrap around the address space, so the conversion from usize // to isize will not wrap either. - let len: isize = self.0.len() as isize; + let len = self.0.len() as isize; unsafe { self.limit().offset(len) } } diff --git a/src/stack/slice_stack.rs b/src/stack/slice_stack.rs new file mode 100644 index 0000000..9b707a1 --- /dev/null +++ b/src/stack/slice_stack.rs @@ -0,0 +1,44 @@ +// This file is part of libfringe, a low-level green threading library. +// Copyright (c) whitequark +// See the LICENSE file included in this distribution. + +use stack::Stack; + +/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice. +#[derive(Debug)] +pub struct SliceStack<'a>(&'a mut [u8]); + +impl<'a> SliceStack<'a> { + /// Creates a `SliceStack` from an existing slice. + /// + /// This function will automatically align the slice to make it suitable for + /// use as a stack. However this function may panic if the slice is smaller + /// than `STACK_ALIGNMENT`. + pub fn new(slice: &'a mut [u8]) -> SliceStack<'a> { + // Align the given slice so that it matches platform requirements + let ptr = slice.as_ptr() as usize; + let adjusted_ptr = (ptr + ::STACK_ALIGNMENT - 1) & !(::STACK_ALIGNMENT - 1); + let offset = adjusted_ptr - ptr; + if offset > slice.len() { + panic!("SliceStack too small"); + } + + let adjusted_len = (slice.len() - offset) & !(::STACK_ALIGNMENT - 1); + SliceStack(&mut slice[offset..(offset + adjusted_len)]) + } +} + +unsafe impl<'a> Stack for SliceStack<'a> { + #[inline(always)] + fn base(&self) -> *mut u8 { + // The slice cannot wrap around the address space, so the conversion from usize + // to isize will not wrap either. + let len = self.0.len() as isize; + unsafe { self.limit().offset(len) } + } + + #[inline(always)] + fn limit(&self) -> *mut u8 { + self.0.as_ptr() as *mut u8 + } +} diff --git a/tests/generator.rs b/tests/generator.rs index c4f0a59..0d45c0e 100644 --- a/tests/generator.rs +++ b/tests/generator.rs @@ -69,7 +69,7 @@ fn panic_safety() { #[test] fn with_slice_stack() { let mut memory = [0; 1024]; - let stack = SliceStack(&mut memory); + let stack = SliceStack::new(&mut memory); let mut add_one = unsafe { Generator::unsafe_new(stack, add_one_fn) }; assert_eq!(add_one.resume(1), Some(2)); assert_eq!(add_one.resume(2), Some(3)); diff --git a/tests/stack.rs b/tests/stack.rs index 79c7095..f8ad4d4 100644 --- a/tests/stack.rs +++ b/tests/stack.rs @@ -4,26 +4,84 @@ // http://apache.org/licenses/LICENSE-2.0> or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. +#![feature(alloc, heap_api)] + +extern crate alloc; extern crate fringe; -use fringe::{Stack, SliceStack, OwnedStack, OsStack}; +use alloc::heap; +use alloc::boxed::Box; +use std::slice; +use fringe::{STACK_ALIGNMENT, Stack, SliceStack, OwnedStack, OsStack}; + +#[test] +fn slice_aligned() { + unsafe { + let ptr = heap::allocate(16384, STACK_ALIGNMENT); + let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, 16384)); + let stack = SliceStack::new(&mut slice[4096..8192]); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + } +} + +#[test] +fn slice_unaligned() { + unsafe { + let ptr = heap::allocate(16384, STACK_ALIGNMENT); + let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, 16384)); + let stack = SliceStack::new(&mut slice[4097..8193]); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + } +} + +#[test] +fn slice_too_small() { + unsafe { + let ptr = heap::allocate(STACK_ALIGNMENT, STACK_ALIGNMENT); + let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, STACK_ALIGNMENT)); + let stack = SliceStack::new(&mut slice[0..1]); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + } +} + +#[test] +#[should_panic="SliceStack too small"] +fn slice_too_small_unaligned() { + unsafe { + let ptr = heap::allocate(STACK_ALIGNMENT, STACK_ALIGNMENT); + let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, STACK_ALIGNMENT)); + SliceStack::new(&mut slice[1..2]); + } +} #[test] fn slice_stack() { - let mut memory = [0; 1024]; - let stack = SliceStack(&mut memory); - assert_eq!(stack.base() as isize - stack.limit() as isize, 1024); + let mut memory = [0; 1024]; + let stack = SliceStack::new(&mut memory); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + + // Size may be a bit smaller due to alignment + assert!(stack.base() as usize - stack.limit() as usize > 1024 - STACK_ALIGNMENT * 2); } #[test] fn owned_stack() { - let stack = OwnedStack::new(1024); - assert_eq!(stack.base() as isize - stack.limit() as isize, 1024); + let stack = OwnedStack::new(1024); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.base() as usize - stack.limit() as usize, 1024); } #[test] fn default_os_stack() { let stack = OsStack::new(0).unwrap(); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + // Make sure the topmost page of the stack, at least, is accessible. unsafe { *(stack.base().offset(-1)) = 0; } } @@ -31,6 +89,9 @@ fn default_os_stack() { #[test] fn one_page_os_stack() { let stack = OsStack::new(4096).unwrap(); + assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0); + assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0); + // Make sure the topmost page of the stack, at least, is accessible. unsafe { *(stack.base().offset(-1)) = 0; } }