1
0
Fork 0

Clean up the stack implementations

close #54
This commit is contained in:
Amanieu d'Antras 2016-09-17 05:17:28 +01:00 committed by edef
parent 1783c25c46
commit fc4cdbf4f5
9 changed files with 149 additions and 66 deletions

View File

@ -33,17 +33,9 @@
#[macro_use]
extern crate std;
pub use stack::Stack;
pub use stack::GuardedStack;
pub use slice_stack::SliceStack;
pub use stack::*;
pub use generator::Generator;
#[cfg(feature = "alloc")]
pub use owned_stack::OwnedStack;
#[cfg(unix)]
pub use os::Stack as OsStack;
mod arch;
/// Minimum alignment of a stack base address on the target platform.
@ -51,12 +43,6 @@ pub const STACK_ALIGNMENT: usize = arch::STACK_ALIGNMENT;
mod debug;
mod stack;
mod slice_stack;
pub mod generator;
#[cfg(feature = "alloc")]
mod owned_stack;
#[cfg(unix)]
mod os;
mod stack;

View File

@ -1,25 +0,0 @@
// This file is part of libfringe, a low-level green threading library.
// Copyright (c) whitequark <whitequark@whitequark.org>
// See the LICENSE file included in this distribution.
/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice.
///
/// Any slice used in a SliceStack must adhere to the [Stack contract][contract].
/// [contract]: trait.Stack.html
#[derive(Debug)]
pub struct SliceStack<'a>(pub &'a mut [u8]);
impl<'a> ::stack::Stack for SliceStack<'a> {
#[inline(always)]
fn base(&self) -> *mut u8 {
// The slice cannot wrap around the address space, so the conversion from usize
// to isize will not wrap either.
let len: isize = self.0.len() as isize;
unsafe { self.limit().offset(len) }
}
#[inline(always)]
fn limit(&self) -> *mut u8 {
self.0.as_ptr() as *mut u8
}
}

View File

@ -6,6 +6,19 @@
// copied, modified, or distributed except according to those terms.
//! Traits for stacks.
mod slice_stack;
pub use stack::slice_stack::SliceStack;
#[cfg(feature = "alloc")]
mod owned_stack;
#[cfg(feature = "alloc")]
pub use stack::owned_stack::OwnedStack;
#[cfg(unix)]
mod os;
#[cfg(unix)]
pub use stack::os::OsStack;
/// A trait for objects that hold ownership of a stack.
///
/// To preserve memory safety, an implementation of this trait must fulfill
@ -16,7 +29,7 @@
/// * Every address between the base and the limit must be readable and writable.
///
/// [align]: constant.STACK_ALIGNMENT.html
pub trait Stack {
pub unsafe trait Stack {
/// Returns the base address of the stack.
/// On all modern architectures, the stack grows downwards,
/// so this is the highest address.

View File

@ -5,27 +5,28 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
extern crate std;
use self::std::io::Error as IoError;
use stack;
use stack::{Stack, GuardedStack};
mod sys;
/// OsStack holds a guarded stack allocated using the operating system's anonymous
/// memory mapping facility.
#[derive(Debug)]
pub struct Stack {
pub struct OsStack {
ptr: *mut u8,
len: usize
len: usize,
}
unsafe impl Send for Stack {}
unsafe impl Send for OsStack {}
impl Stack {
impl OsStack {
/// Allocates a new stack with at least `size` accessible bytes.
/// `size` is rounded up to an integral number of pages; `Stack::new(0)` is legal
/// `size` is rounded up to an integral number of pages; `OsStack::new(0)` is legal
/// and allocates the smallest possible stack, consisting of one data page and
/// one guard page.
pub fn new(size: usize) -> Result<Stack, IoError> {
pub fn new(size: usize) -> Result<OsStack, IoError> {
let page_size = sys::page_size();
// Stacks have to be at least one page long.
@ -39,9 +40,10 @@ impl Stack {
let len = len + page_size;
// Allocate a stack.
let stack = Stack {
ptr: try!(unsafe { sys::map_stack(len) }),
len: len
let ptr = try!(unsafe { sys::map_stack(len) });
let stack = OsStack {
ptr: ptr,
len: len,
};
// Mark the guard page. If this fails, `stack` will be dropped,
@ -52,7 +54,7 @@ impl Stack {
}
}
impl stack::Stack for Stack {
unsafe impl Stack for OsStack {
#[inline(always)]
fn base(&self) -> *mut u8 {
unsafe {
@ -68,9 +70,9 @@ impl stack::Stack for Stack {
}
}
unsafe impl stack::GuardedStack for Stack {}
unsafe impl GuardedStack for OsStack {}
impl Drop for Stack {
impl Drop for OsStack {
fn drop(&mut self) {
unsafe { sys::unmap_stack(self.ptr, self.len) }.expect("cannot unmap stack")
}

View File

@ -6,6 +6,7 @@ extern crate alloc;
use core::slice;
use self::alloc::heap;
use self::alloc::boxed::Box;
use stack::Stack;
/// OwnedStack holds a non-guarded, heap-allocated stack.
#[derive(Debug)]
@ -16,18 +17,19 @@ impl OwnedStack {
/// for the current platform using the default Rust allocator.
pub fn new(size: usize) -> OwnedStack {
unsafe {
let ptr = heap::allocate(size, ::STACK_ALIGNMENT);
OwnedStack(Box::from_raw(slice::from_raw_parts_mut(ptr, size)))
let aligned_size = size & !(::STACK_ALIGNMENT - 1);
let ptr = heap::allocate(aligned_size, ::STACK_ALIGNMENT);
OwnedStack(Box::from_raw(slice::from_raw_parts_mut(ptr, aligned_size)))
}
}
}
impl ::stack::Stack for OwnedStack {
unsafe impl Stack for OwnedStack {
#[inline(always)]
fn base(&self) -> *mut u8 {
// The slice cannot wrap around the address space, so the conversion from usize
// to isize will not wrap either.
let len: isize = self.0.len() as isize;
let len = self.0.len() as isize;
unsafe { self.limit().offset(len) }
}

44
src/stack/slice_stack.rs Normal file
View File

@ -0,0 +1,44 @@
// This file is part of libfringe, a low-level green threading library.
// Copyright (c) whitequark <whitequark@whitequark.org>
// See the LICENSE file included in this distribution.
use stack::Stack;
/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice.
#[derive(Debug)]
pub struct SliceStack<'a>(&'a mut [u8]);
impl<'a> SliceStack<'a> {
/// Creates a `SliceStack` from an existing slice.
///
/// This function will automatically align the slice to make it suitable for
/// use as a stack. However this function may panic if the slice is smaller
/// than `STACK_ALIGNMENT`.
pub fn new(slice: &'a mut [u8]) -> SliceStack<'a> {
// Align the given slice so that it matches platform requirements
let ptr = slice.as_ptr() as usize;
let adjusted_ptr = (ptr + ::STACK_ALIGNMENT - 1) & !(::STACK_ALIGNMENT - 1);
let offset = adjusted_ptr - ptr;
if offset > slice.len() {
panic!("SliceStack too small");
}
let adjusted_len = (slice.len() - offset) & !(::STACK_ALIGNMENT - 1);
SliceStack(&mut slice[offset..(offset + adjusted_len)])
}
}
unsafe impl<'a> Stack for SliceStack<'a> {
#[inline(always)]
fn base(&self) -> *mut u8 {
// The slice cannot wrap around the address space, so the conversion from usize
// to isize will not wrap either.
let len = self.0.len() as isize;
unsafe { self.limit().offset(len) }
}
#[inline(always)]
fn limit(&self) -> *mut u8 {
self.0.as_ptr() as *mut u8
}
}

View File

@ -69,7 +69,7 @@ fn panic_safety() {
#[test]
fn with_slice_stack() {
let mut memory = [0; 1024];
let stack = SliceStack(&mut memory);
let stack = SliceStack::new(&mut memory);
let mut add_one = unsafe { Generator::unsafe_new(stack, add_one_fn) };
assert_eq!(add_one.resume(1), Some(2));
assert_eq!(add_one.resume(2), Some(3));

View File

@ -4,26 +4,84 @@
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![feature(alloc, heap_api)]
extern crate alloc;
extern crate fringe;
use fringe::{Stack, SliceStack, OwnedStack, OsStack};
use alloc::heap;
use alloc::boxed::Box;
use std::slice;
use fringe::{STACK_ALIGNMENT, Stack, SliceStack, OwnedStack, OsStack};
#[test]
fn slice_aligned() {
unsafe {
let ptr = heap::allocate(16384, STACK_ALIGNMENT);
let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, 16384));
let stack = SliceStack::new(&mut slice[4096..8192]);
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
}
}
#[test]
fn slice_unaligned() {
unsafe {
let ptr = heap::allocate(16384, STACK_ALIGNMENT);
let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, 16384));
let stack = SliceStack::new(&mut slice[4097..8193]);
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
}
}
#[test]
fn slice_too_small() {
unsafe {
let ptr = heap::allocate(STACK_ALIGNMENT, STACK_ALIGNMENT);
let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, STACK_ALIGNMENT));
let stack = SliceStack::new(&mut slice[0..1]);
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
}
}
#[test]
#[should_panic="SliceStack too small"]
fn slice_too_small_unaligned() {
unsafe {
let ptr = heap::allocate(STACK_ALIGNMENT, STACK_ALIGNMENT);
let mut slice = Box::from_raw(slice::from_raw_parts_mut(ptr, STACK_ALIGNMENT));
SliceStack::new(&mut slice[1..2]);
}
}
#[test]
fn slice_stack() {
let mut memory = [0; 1024];
let stack = SliceStack(&mut memory);
assert_eq!(stack.base() as isize - stack.limit() as isize, 1024);
let stack = SliceStack::new(&mut memory);
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
// Size may be a bit smaller due to alignment
assert!(stack.base() as usize - stack.limit() as usize > 1024 - STACK_ALIGNMENT * 2);
}
#[test]
fn owned_stack() {
let stack = OwnedStack::new(1024);
assert_eq!(stack.base() as isize - stack.limit() as isize, 1024);
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.base() as usize - stack.limit() as usize, 1024);
}
#[test]
fn default_os_stack() {
let stack = OsStack::new(0).unwrap();
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
// Make sure the topmost page of the stack, at least, is accessible.
unsafe { *(stack.base().offset(-1)) = 0; }
}
@ -31,6 +89,9 @@ fn default_os_stack() {
#[test]
fn one_page_os_stack() {
let stack = OsStack::new(4096).unwrap();
assert_eq!(stack.base() as usize & (STACK_ALIGNMENT - 1), 0);
assert_eq!(stack.limit() as usize & (STACK_ALIGNMENT - 1), 0);
// Make sure the topmost page of the stack, at least, is accessible.
unsafe { *(stack.base().offset(-1)) = 0; }
}