blob: bc7881f153c5dc798bfe935ce15bda4ad2073f11 [file] [log] [blame]
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
//! # liballoc crate for Rust SGX SDK
//!
//! This crate equals to the `liballoc_system` crate in Rust.
//! It connects Rust memory allocation to Intel SGX's sgx_tstd library.
//! It is essential, because we depends on Intel SGX's SDK.
//! 2018-06-22 Add liballoc components here
use core::alloc::{AllocError, Allocator, GlobalAlloc, Layout};
use core::intrinsics;
use core::ptr::{self, NonNull};
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
#[cfg(target_arch = "x86")]
const MIN_ALIGN: usize = 8;
// The alignment of sgx tlibc is 16
// https://github.com/intel/linux-sgx/blob/master/sdk/tlibc/stdlib/malloc.c#L541
#[cfg(target_arch = "x86_64")]
const MIN_ALIGN: usize = 16;
pub struct System;
impl System {
#[inline]
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed {
GlobalAlloc::alloc_zeroed(self, layout)
} else {
GlobalAlloc::alloc(self, layout)
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, size))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline]
unsafe fn grow_impl(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => self.alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `new_size` is greater than or equal to `old_size`
// as required by safety conditions and the `old_size == 0` case was handled in the
// previous match arm. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
intrinsics::assume(new_size >= old_layout.size());
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
}
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
Allocator::deallocate(&self, ptr, old_layout);
Ok(new_ptr)
}
}
}
}
// The Allocator impl checks the layout size to be non-zero and forwards to the GlobalAlloc impl,
// which is in `std::sys::*::alloc`.
unsafe impl Allocator for System {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
#[inline]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
GlobalAlloc::dealloc(self, ptr.as_ptr(), layout)
}
}
#[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
self.grow_impl(ptr, old_layout, new_layout, false)
}
#[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
self.grow_impl(ptr, old_layout, new_layout, true)
}
#[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => {
Allocator::deallocate(&self, ptr, old_layout);
Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
}
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
intrinsics::assume(new_size <= old_layout.size());
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
}
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => {
let new_ptr = Allocator::allocate(&self, new_layout)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
Allocator::deallocate(&self, ptr, old_layout);
Ok(new_ptr)
}
}
}
}
mod realloc_fallback {
use core::alloc::{GlobalAlloc, Layout};
use core::cmp;
use core::ptr;
impl super::System {
pub(crate) unsafe fn realloc_fallback(
&self,
ptr: *mut u8,
old_layout: Layout,
new_size: usize,
) -> *mut u8 {
// Docs for GlobalAlloc::realloc require this to be valid:
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
let new_ptr = GlobalAlloc::alloc(self, new_layout);
if !new_ptr.is_null() {
let size = cmp::min(old_layout.size(), new_size);
ptr::copy_nonoverlapping(ptr, new_ptr, size);
GlobalAlloc::dealloc(self, ptr, old_layout);
}
new_ptr
}
}
}
mod platform {
use super::*;
use core::alloc::{GlobalAlloc, Layout};
use core::ffi::c_void;
use core::ptr;
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
libc::malloc(layout.size()) as *mut u8
} else {
aligned_malloc(&layout)
}
}
#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
libc::calloc(layout.size(), 1) as *mut u8
} else {
let ptr = self.alloc(layout);
if !ptr.is_null() {
ptr::write_bytes(ptr, 0, layout.size());
}
ptr
}
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
libc::free(ptr as *mut c_void)
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
libc::realloc(ptr as *mut c_void, new_size) as *mut u8
} else {
self.realloc_fallback(ptr, layout, new_size)
}
}
}
#[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
libc::memalign(layout.align(), layout.size()) as *mut u8
}
}
mod libc {
use core::ffi::c_void;
type size_t = usize;
extern "C" {
pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void;
pub fn malloc(size: size_t) -> *mut c_void;
pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void;
pub fn free(p: *mut c_void);
pub fn memalign(align: size_t, size: size_t) -> *mut c_void;
}
}