compiler: support rustc 2021-03-30
diff --git a/samplecode/unit-test/enclave/src/lib.rs b/samplecode/unit-test/enclave/src/lib.rs
index b9ea49b..e010cd5 100644
--- a/samplecode/unit-test/enclave/src/lib.rs
+++ b/samplecode/unit-test/enclave/src/lib.rs
@@ -348,4 +348,3 @@
test_exception_handler,
)
}
-
diff --git a/samplecode/unit-test/enclave/src/test_rand.rs b/samplecode/unit-test/enclave/src/test_rand.rs
index c748642..6d2d8ca 100644
--- a/samplecode/unit-test/enclave/src/test_rand.rs
+++ b/samplecode/unit-test/enclave/src/test_rand.rs
@@ -15,7 +15,6 @@
// specific language governing permissions and limitations
// under the License..
-use std::panic;
use sgx_rand::*;
// pub use os::SgxRng
diff --git a/samplecode/unit-test/enclave/src/test_rts.rs b/samplecode/unit-test/enclave/src/test_rts.rs
index 25d6047..668e50f 100644
--- a/samplecode/unit-test/enclave/src/test_rts.rs
+++ b/samplecode/unit-test/enclave/src/test_rts.rs
@@ -94,13 +94,14 @@
}
pub fn test_data_is_within_enclave() {
+ #[allow(dead_code)]
#[derive(Clone, Copy)]
struct SampleDs{
x: i32,
y: i32,
z: [i32; 100],
- };
- unsafe impl marker::ContiguousMemory for SampleDs{};
+ }
+ unsafe impl marker::ContiguousMemory for SampleDs {}
let mut sample_object : SampleDs = SampleDs{ x: 0, y: 0, z: [0; 100]};
sample_object.x = 100;
sample_object.y = 100;
@@ -138,13 +139,14 @@
}
pub fn test_data_is_outside_enclave() {
+ #[allow(dead_code)]
#[derive(Clone, Copy)]
struct SampleDs{
x: i32,
y: i32,
z: [i32; 100],
- };
- unsafe impl marker::ContiguousMemory for SampleDs{};
+ }
+ unsafe impl marker::ContiguousMemory for SampleDs {}
let mut sample_object : SampleDs = SampleDs{ x: 0, y: 0, z: [0; 100]};
sample_object.x = 100;
sample_object.y = 100;
diff --git a/samplecode/unit-test/enclave/src/test_time.rs b/samplecode/unit-test/enclave/src/test_time.rs
index d17e6ea..a9ec118 100644
--- a/samplecode/unit-test/enclave/src/test_time.rs
+++ b/samplecode/unit-test/enclave/src/test_time.rs
@@ -1,5 +1,4 @@
use std::time::*;
-use std::panic;
use std::untrusted::time::{InstantEx, SystemTimeEx};
pub fn test_std_time() {
diff --git a/sgx_alloc/Cargo.toml b/sgx_alloc/Cargo.toml
index 99bf1de..af14267 100644
--- a/sgx_alloc/Cargo.toml
+++ b/sgx_alloc/Cargo.toml
@@ -2,6 +2,7 @@
name = "sgx_alloc"
version = "1.1.3"
authors = ["The Teaclave Authors"]
+build = "build.rs"
repository = "https://github.com/apache/teaclave-sgx-sdk"
license-file = "LICENSE"
documentation = "https://dingelish.github.io/"
diff --git a/sgx_alloc/build.rs b/sgx_alloc/build.rs
new file mode 100644
index 0000000..47ea68c
--- /dev/null
+++ b/sgx_alloc/build.rs
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use std::env;
+use std::process::Command;
+
+fn main() {
+ // nightly-2020-12-05 (rustc 2020-12-04)
+ // Rename `AllocRef` to `Allocator` and `(de)alloc` to `(de)allocate`
+ // https://github.com/rust-lang/rust/commit/9274b37d99f608e5fde569788ee79bd72fc3cf13
+ if let Some(true) = is_min_date("2020-12-04") {
+ println!("cargo:rustc-cfg=enable_allocator_traits");
+ }
+}
+
+// code below copied from crate version_check
+// we want to remove the build dependencies to make the dependency tree
+// as clean as possible. the following codes credit to SergioBenitez
+#[derive(Debug, PartialEq, Eq, Copy, Clone, PartialOrd, Ord)]
+struct Date(u32);
+
+impl Date {
+ fn read() -> Option<Date> {
+ get_version_and_date()
+ .and_then(|(_, date)| date)
+ .and_then(|date| Date::parse(&date))
+ }
+
+ fn parse(date: &str) -> Option<Date> {
+ let ymd: Vec<u32> = date.split("-")
+ .filter_map(|s| s.parse::<u32>().ok())
+ .collect();
+
+ if ymd.len() != 3 {
+ return None
+ }
+
+ let (y, m, d) = (ymd[0], ymd[1], ymd[2]);
+ Some(Date((y << 9) | ((m & 0xF) << 5) | (d & 0x1F)))
+ }
+}
+
+fn get_version_and_date() -> Option<(Option<String>, Option<String>)> {
+ env::var("RUSTC").ok()
+ .and_then(|rustc| Command::new(rustc).arg("--version").output().ok())
+ .or_else(|| Command::new("rustc").arg("--version").output().ok())
+ .and_then(|output| String::from_utf8(output.stdout).ok())
+ .map(|s| version_and_date_from_rustc_version(&s))
+}
+
+fn version_and_date_from_rustc_version(s: &str) -> (Option<String>, Option<String>) {
+ let last_line = s.lines().last().unwrap_or(s);
+ let mut components = last_line.trim().split(" ");
+ let version = components.nth(1);
+ let date = components.filter(|c| c.ends_with(')')).next()
+ .map(|s| s.trim_end().trim_end_matches(")").trim_start().trim_start_matches('('));
+ (version.map(|s| s.to_string()), date.map(|s| s.to_string()))
+}
+
+fn is_min_date(min_date: &str) -> Option<bool> {
+ match (Date::read(), Date::parse(min_date)) {
+ (Some(rustc_date), Some(min_date)) => Some(rustc_date >= min_date),
+ _ => None
+ }
+}
diff --git a/sgx_alloc/src/alignalloc.rs b/sgx_alloc/src/alignalloc.rs
index 6d5460a..f9afa9e 100644
--- a/sgx_alloc/src/alignalloc.rs
+++ b/sgx_alloc/src/alignalloc.rs
@@ -135,7 +135,7 @@
if !check_layout(&layout) {
return ptr::null_mut();
}
- let align_layout = match if align_req.len() == 0 {
+ let align_layout = match if align_req.is_empty() {
let req: [AlignReq; 1] = [AlignReq {
offset: 0,
len: layout.size(),
@@ -169,7 +169,7 @@
if !check_layout(&layout) {
return ptr::null_mut();
}
- let align_layout = match if align_req.len() == 0 {
+ let align_layout = match if align_req.is_empty() {
let req: [AlignReq; 1] = [AlignReq {
offset: 0,
len: layout.size(),
@@ -206,18 +206,17 @@
let pad = align_layout.size() - align_layout.align() - layout.size();
let raw = libc::malloc(align_layout.size() + mem::size_of::<*mut u8>()) as *mut u8;
- let aligned_ptr = if raw.is_null() {
+ if raw.is_null() {
raw
} else {
- if zeroed == true {
+ if zeroed {
ptr::write_bytes(raw, 0, align_layout.size());
}
let ptr = make_aligned_ptr(raw, align_layout.align(), pad);
let p = ptr as *mut *mut u8;
p.sub(1).write(raw);
ptr
- };
- aligned_ptr
+ }
}
#[inline]
@@ -260,22 +259,17 @@
#[inline]
fn check_overflow(buf: usize, len: usize) -> bool {
- (buf + len < len) || (buf + len < buf)
+ buf.checked_add(len).is_none()
}
fn check_layout(layout: &Layout) -> bool {
- if layout.size() == 0
+ !(layout.size() == 0
|| !layout.align().is_power_of_two()
- || layout.size() > usize::MAX - (layout.align() - 1)
- {
- false
- } else {
- true
- }
+ || layout.size() > usize::MAX - (layout.align() - 1))
}
fn check_align_req(size: usize, align_req: &[AlignReq]) -> bool {
- if align_req.len() == 0 {
+ if align_req.is_empty() {
return false;
}
let len: usize = (size + 7) / 8;
@@ -335,12 +329,10 @@
}
fn count_lzb(bmp: i64) -> i32 {
- if bmp == 0 {
- -1
- } else if bmp < 0 {
- 0
- } else {
- count_lzb(bmp << 1) + 1
+ match bmp {
+ 0 => -1,
+ x if x < 0 => 0,
+ _ => count_lzb(bmp << 1) + 1,
}
}
diff --git a/sgx_alloc/src/alignbox.rs b/sgx_alloc/src/alignbox.rs
index 566d85a..5a888b2 100644
--- a/sgx_alloc/src/alignbox.rs
+++ b/sgx_alloc/src/alignbox.rs
@@ -166,7 +166,7 @@
impl<T> AlignBox<T> {
fn new_with_req_in(align: usize, align_req: &[AlignReq]) -> Option<AlignBox<T>> {
- if align_req.len() == 0 {
+ if align_req.is_empty() {
AlignBox::new_in()
} else {
AlignBox::allocate_in(true, align, align_req)
@@ -217,7 +217,7 @@
Some(AlignBox {
ptr: Unique::new(ptr.cast::<T>().as_ptr()).unwrap(),
- align_layout: align_layout,
+ align_layout,
origin_layout: layout,
})
}
@@ -242,9 +242,8 @@
{
unsafe {
let mut t = Self::new_in();
- match t {
- Some(ref mut b) => initialize(b.ptr.as_mut()),
- None => (),
+ if let Some(ref mut b) = t {
+ initialize(b.ptr.as_mut())
}
t
}
@@ -255,9 +254,8 @@
{
unsafe {
let mut t = Self::new_with_align(align);
- match t {
- Some(ref mut b) => initialize(b.ptr.as_mut()),
- None => (),
+ if let Some(ref mut b) = t {
+ initialize(b.ptr.as_mut())
}
t
}
@@ -272,9 +270,8 @@
{
unsafe {
let mut t = Self::new_with_req(align, data);
- match t {
- Some(ref mut b) => initialize(b.ptr.as_mut()),
- None => (),
+ if let Some(ref mut b) = t {
+ initialize(b.ptr.as_mut())
}
t
}
diff --git a/sgx_alloc/src/rsrvmem.rs b/sgx_alloc/src/rsrvmem.rs
index 2cf832c..793775c 100644
--- a/sgx_alloc/src/rsrvmem.rs
+++ b/sgx_alloc/src/rsrvmem.rs
@@ -30,7 +30,6 @@
}
impl RsrvMemAlloc {
-
/// Allocate a range of EPC memory from the reserved memory area
///
/// # Parameters
@@ -53,9 +52,9 @@
/// # Parameters
///
/// **addr**
- ///
+ ///
/// The desired starting address to allocate the reserved memory. Should be page aligned.
- ///
+ ///
/// **count**
///
/// Count of pages to allocate region
@@ -65,7 +64,11 @@
/// Starting address of the new allocated memory area on success;
///
#[inline]
- pub unsafe fn alloc_with_addr(&self, addr: NonNull<u8>, count: u32) -> Result<NonNull<u8>, RsrvMemAllocErr> {
+ pub unsafe fn alloc_with_addr(
+ &self,
+ addr: NonNull<u8>,
+ count: u32,
+ ) -> Result<NonNull<u8>, RsrvMemAllocErr> {
NonNull::new(platform::alloc_with_addr(addr.as_ptr(), count)).ok_or(RsrvMemAllocErr)
}
diff --git a/sgx_alloc/src/system.rs b/sgx_alloc/src/system.rs
index a29a723..defdb1a 100644
--- a/sgx_alloc/src/system.rs
+++ b/sgx_alloc/src/system.rs
@@ -22,9 +22,11 @@
//! It is essential, because we depends on Intel SGX's SDK.
//! 2018-06-22 Add liballoc components here
-use core::alloc::{
- AllocError, AllocRef, GlobalAlloc, Layout,
-};
+use core::alloc::{AllocError, GlobalAlloc, Layout};
+#[cfg(enable_allocator_traits)]
+use core::alloc::Allocator;
+#[cfg(not(enable_allocator_traits))]
+use core::alloc::AllocRef as Allocator;
use core::intrinsics;
use core::ptr::{self, NonNull};
@@ -59,7 +61,7 @@
}
}
- // Safety: Same as `AllocRef::grow`
+ // Safety: Same as `Allocator::grow`
#[inline]
unsafe fn grow_impl(
&self,
@@ -100,24 +102,51 @@
old_size => {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
- AllocRef::dealloc(&self, ptr, old_layout);
+ if old_layout.size() != 0 {
+ GlobalAlloc::dealloc(self, ptr.as_ptr(), old_layout)
+ }
Ok(new_ptr)
},
}
}
}
-unsafe impl AllocRef for System {
+unsafe impl Allocator for System {
+ #[cfg(enable_allocator_traits)]
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, false)
+ }
+
+ #[cfg(not(enable_allocator_traits))]
#[inline]
fn alloc(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
+ #[cfg(enable_allocator_traits)]
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, true)
+ }
+
+ #[cfg(not(enable_allocator_traits))]
#[inline]
fn alloc_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
+ #[cfg(enable_allocator_traits)]
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ GlobalAlloc::dealloc(self, ptr.as_ptr(), layout)
+ }
+ }
+
+ #[cfg(not(enable_allocator_traits))]
#[inline]
unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
@@ -157,7 +186,9 @@
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => {
- AllocRef::dealloc(&self, ptr, old_layout);
+ if old_layout.size() != 0 {
+ GlobalAlloc::dealloc(self, ptr.as_ptr(), old_layout)
+ }
Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
@@ -176,9 +207,11 @@
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => {
- let new_ptr = AllocRef::alloc(&self, new_layout)?;
+ let new_ptr = self.alloc_impl(new_layout, false)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
- AllocRef::dealloc(&self, ptr, old_layout);
+ if old_layout.size() != 0 {
+ GlobalAlloc::dealloc(self, ptr.as_ptr(), old_layout)
+ }
Ok(new_ptr)
},
}
diff --git a/sgx_serialize_derive/src/lib.rs b/sgx_serialize_derive/src/lib.rs
index a76d1f1..29813e6 100644
--- a/sgx_serialize_derive/src/lib.rs
+++ b/sgx_serialize_derive/src/lib.rs
@@ -56,7 +56,7 @@
let input = syn::parse_derive_input(&input.to_string()).unwrap();
match encode::expand_derive_serialize(&input) {
Ok(expanded) => expanded.parse().unwrap(),
- Err(msg) => panic!(msg),
+ Err(msg) => panic!("{}", msg),
}
}
@@ -72,6 +72,6 @@
let input = syn::parse_derive_input(&input.to_string()).unwrap();
match decode::expand_derive_deserialize(&input) {
Ok(expanded) => expanded.parse().unwrap(),
- Err(msg) => panic!(msg),
+ Err(msg) => panic!("{}", msg),
}
}
diff --git a/sgx_signal/src/exception.rs b/sgx_signal/src/exception.rs
index 9b3f512..3cb482e 100644
--- a/sgx_signal/src/exception.rs
+++ b/sgx_signal/src/exception.rs
@@ -143,7 +143,7 @@
#[no_mangle]
#[inline(never)]
-unsafe extern "C" fn exception_panic(vector: sgx_exception_vector_t, rip: usize) {
+unsafe fn exception_panic(vector: sgx_exception_vector_t, rip: usize) {
let exception = match vector {
sgx_exception_vector_t::SGX_EXCEPTION_VECTOR_DE => "#DE",
sgx_exception_vector_t::SGX_EXCEPTION_VECTOR_DB => "#DB",
diff --git a/sgx_tstd/build.rs b/sgx_tstd/build.rs
index e0fd1d9..ddc7ab9 100644
--- a/sgx_tstd/build.rs
+++ b/sgx_tstd/build.rs
@@ -45,6 +45,12 @@
if let Some(true) = is_min_date("2020-11-25") {
println!("cargo:rustc-cfg=enable_auto_traits");
}
+
+ // nightly-2021-02-08 (rustc 2021-02-07)
+ // https://github.com/rust-lang/rust/commit/dbdbd30bf2cb0d48c8bbce83c2458592664dbb18
+ if let Some(true) = is_min_date("2021-02-07") {
+ println!("cargo:rustc-cfg=derive_macros");
+ }
}
// code below copied from crate version_check
diff --git a/sgx_tstd/hashbrown/src/raw/sse2.rs b/sgx_tstd/hashbrown/src/raw/sse2.rs
index a27bc09..dbe61d8 100644
--- a/sgx_tstd/hashbrown/src/raw/sse2.rs
+++ b/sgx_tstd/hashbrown/src/raw/sse2.rs
@@ -33,7 +33,7 @@
struct AlignedBytes {
_align: [Group; 0],
bytes: [u8; Group::WIDTH],
- };
+ }
const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
_align: [],
bytes: [EMPTY; Group::WIDTH],
diff --git a/sgx_tstd/src/future.rs b/sgx_tstd/src/future.rs
index bbbc4f2..e76e41c 100644
--- a/sgx_tstd/src/future.rs
+++ b/sgx_tstd/src/future.rs
@@ -17,107 +17,14 @@
//! Asynchronous values.
-#[cfg(bootstrap)]
-use core::{
- cell::Cell,
- marker::Unpin,
- ops::{Drop, Generator, GeneratorState},
- pin::Pin,
- ptr::NonNull,
- task::{Context, Poll},
-};
+#[doc(inline)]
+pub use core::future::Future;
#[doc(inline)]
-pub use core::future::*;
+pub use core::future::{from_generator, get_context, ResumeTy};
-/// Wrap a generator in a future.
-///
-/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
-/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
-// This is `const` to avoid extra errors after we recover from `const async fn`
-#[cfg(bootstrap)]
-#[doc(hidden)]
-pub const fn from_generator<T: Generator<Yield = ()>>(x: T) -> impl Future<Output = T::Return> {
- GenFuture(x)
-}
+#[doc(inline)]
+pub use core::future::{pending, ready, Pending, Ready};
-/// A wrapper around generators used to implement `Future` for `async`/`await` code.
-#[cfg(bootstrap)]
-#[doc(hidden)]
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
-struct GenFuture<T: Generator<Yield = ()>>(T);
-
-// We rely on the fact that async/await futures are immovable in order to create
-// self-referential borrows in the underlying generator.
-#[cfg(bootstrap)]
-impl<T: Generator<Yield = ()>> !Unpin for GenFuture<T> {}
-
-#[cfg(bootstrap)]
-#[doc(hidden)]
-impl<T: Generator<Yield = ()>> Future for GenFuture<T> {
- type Output = T::Return;
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- // Safe because we're !Unpin + !Drop mapping to a ?Unpin value
- let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) };
- let _guard = unsafe { set_task_context(cx) };
- match gen.resume(()) {
- GeneratorState::Yielded(()) => Poll::Pending,
- GeneratorState::Complete(x) => Poll::Ready(x),
- }
- }
-}
-
-#[cfg(bootstrap)]
-thread_local! {
- static TLS_CX: Cell<Option<NonNull<Context<'static>>>> = Cell::new(None);
-}
-
-#[cfg(bootstrap)]
-struct SetOnDrop(Option<NonNull<Context<'static>>>);
-
-#[cfg(bootstrap)]
-impl Drop for SetOnDrop {
- fn drop(&mut self) {
- TLS_CX.with(|tls_cx| {
- tls_cx.set(self.0.take());
- });
- }
-}
-
-// Safety: the returned guard must drop before `cx` is dropped and before
-// any previous guard is dropped.
-#[cfg(bootstrap)]
-unsafe fn set_task_context(cx: &mut Context<'_>) -> SetOnDrop {
- // transmute the context's lifetime to 'static so we can store it.
- let cx = core::mem::transmute::<&mut Context<'_>, &mut Context<'static>>(cx);
- let old_cx = TLS_CX.with(|tls_cx| tls_cx.replace(Some(NonNull::from(cx))));
- SetOnDrop(old_cx)
-}
-
-#[cfg(bootstrap)]
-#[doc(hidden)]
-/// Polls a future in the current thread-local task waker.
-pub fn poll_with_tls_context<F>(f: Pin<&mut F>) -> Poll<F::Output>
-where
- F: Future,
-{
- let cx_ptr = TLS_CX.with(|tls_cx| {
- // Clear the entry so that nested `get_task_waker` calls
- // will fail or set their own value.
- tls_cx.replace(None)
- });
- let _reset = SetOnDrop(cx_ptr);
-
- let mut cx_ptr = cx_ptr.expect(
- "TLS Context not set. This is a rustc bug. \
- Please file an issue on https://github.com/rust-lang/rust.",
- );
-
- // Safety: we've ensured exclusive access to the context by
- // removing the pointer from TLS, only to be replaced once
- // we're done with it.
- //
- // The pointer that was inserted came from an `&mut Context<'_>`,
- // so it is safe to treat as mutable.
- unsafe { F::poll(f, cx_ptr.as_mut()) }
-}
+#[doc(inline)]
+pub use core::future::IntoFuture;
diff --git a/sgx_tstd/src/lazy.rs b/sgx_tstd/src/lazy.rs
new file mode 100644
index 0000000..36171de
--- /dev/null
+++ b/sgx_tstd/src/lazy.rs
@@ -0,0 +1,562 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+//! Lazy values and one-time initialization of static data.
+
+use core::cell::{Cell, UnsafeCell};
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+use core::ops::{Deref, Drop};
+use core::pin::Pin;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::sync::Once;
+
+#[doc(inline)]
+pub use core::lazy::*;
+
+/// A synchronization primitive which can be written to only once.
+///
+/// This type is a thread-safe `OnceCell`.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::lazy::SyncOnceCell;
+///
+/// static CELL: SyncOnceCell<String> = SyncOnceCell::new();
+/// assert!(CELL.get().is_none());
+///
+/// std::thread::spawn(|| {
+/// let value: &String = CELL.get_or_init(|| {
+/// "Hello, World!".to_string()
+/// });
+/// assert_eq!(value, "Hello, World!");
+/// }).join().unwrap();
+///
+/// let value: Option<&String> = CELL.get();
+/// assert!(value.is_some());
+/// assert_eq!(value.unwrap().as_str(), "Hello, World!");
+/// ```
+pub struct SyncOnceCell<T> {
+ once: Once,
+ // Whether or not the value is initialized is tracked by `state_and_queue`.
+ value: UnsafeCell<MaybeUninit<T>>,
+ /// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
+ ///
+ /// ```compile_fail,E0597
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncOnceCell;
+ ///
+ /// struct A<'a>(&'a str);
+ ///
+ /// impl<'a> Drop for A<'a> {
+ /// fn drop(&mut self) {}
+ /// }
+ ///
+ /// let cell = SyncOnceCell::new();
+ /// {
+ /// let s = String::new();
+ /// let _ = cell.set(A(&s));
+ /// }
+ /// ```
+ _marker: PhantomData<T>,
+}
+
+// Why do we need `T: Send`?
+// Thread A creates a `SyncOnceCell` and shares it with
+// scoped thread B, which fills the cell, which is
+// then destroyed by A. That is, destructor observes
+// a sent value.
+unsafe impl<T: Sync + Send> Sync for SyncOnceCell<T> {}
+unsafe impl<T: Send> Send for SyncOnceCell<T> {}
+
+impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for SyncOnceCell<T> {}
+impl<T: UnwindSafe> UnwindSafe for SyncOnceCell<T> {}
+
+impl<T> Default for SyncOnceCell<T> {
+ fn default() -> SyncOnceCell<T> {
+ SyncOnceCell::new()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for SyncOnceCell<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.get() {
+ Some(v) => f.debug_tuple("Once").field(v).finish(),
+ None => f.write_str("Once(Uninit)"),
+ }
+ }
+}
+
+impl<T: Clone> Clone for SyncOnceCell<T> {
+ fn clone(&self) -> SyncOnceCell<T> {
+ let cell = Self::new();
+ if let Some(value) = self.get() {
+ match cell.set(value.clone()) {
+ Ok(()) => (),
+ Err(_) => unreachable!(),
+ }
+ }
+ cell
+ }
+}
+
+impl<T> From<T> for SyncOnceCell<T> {
+ fn from(value: T) -> Self {
+ let cell = Self::new();
+ match cell.set(value) {
+ Ok(()) => cell,
+ Err(_) => unreachable!(),
+ }
+ }
+}
+
+impl<T: PartialEq> PartialEq for SyncOnceCell<T> {
+ fn eq(&self, other: &SyncOnceCell<T>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+impl<T: Eq> Eq for SyncOnceCell<T> {}
+
+impl<T> SyncOnceCell<T> {
+ /// Creates a new empty cell.
+ pub const fn new() -> SyncOnceCell<T> {
+ SyncOnceCell {
+ once: Once::new(),
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Gets the reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty, or being initialized. This
+ /// method never blocks.
+ pub fn get(&self) -> Option<&T> {
+ if self.is_initialized() {
+ // Safe b/c checked is_initialized
+ Some(unsafe { self.get_unchecked() })
+ } else {
+ None
+ }
+ }
+
+ /// Gets the mutable reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty. This method never blocks.
+ pub fn get_mut(&mut self) -> Option<&mut T> {
+ if self.is_initialized() {
+ // Safe b/c checked is_initialized and we have a unique access
+ Some(unsafe { self.get_unchecked_mut() })
+ } else {
+ None
+ }
+ }
+
+ /// Sets the contents of this cell to `value`.
+ ///
+ /// Returns `Ok(())` if the cell's value was updated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncOnceCell;
+ ///
+ /// static CELL: SyncOnceCell<i32> = SyncOnceCell::new();
+ ///
+ /// fn main() {
+ /// assert!(CELL.get().is_none());
+ ///
+ /// std::thread::spawn(|| {
+ /// assert_eq!(CELL.set(92), Ok(()));
+ /// }).join().unwrap();
+ ///
+ /// assert_eq!(CELL.set(62), Err(62));
+ /// assert_eq!(CELL.get(), Some(&92));
+ /// }
+ /// ```
+ pub fn set(&self, value: T) -> Result<(), T> {
+ let mut value = Some(value);
+ self.get_or_init(|| value.take().unwrap());
+ match value {
+ None => Ok(()),
+ Some(value) => Err(value),
+ }
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if the cell
+ /// was empty.
+ ///
+ /// Many threads may call `get_or_init` concurrently with different
+ /// initializing functions, but it is guaranteed that only one function
+ /// will be executed.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and the cell
+ /// remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. The
+ /// exact outcome is unspecified. Current implementation deadlocks, but
+ /// this may be changed to a panic in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncOnceCell;
+ ///
+ /// let cell = SyncOnceCell::new();
+ /// let value = cell.get_or_init(|| 92);
+ /// assert_eq!(value, &92);
+ /// let value = cell.get_or_init(|| unreachable!());
+ /// assert_eq!(value, &92);
+ /// ```
+ pub fn get_or_init<F>(&self, f: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ match self.get_or_try_init(|| Ok::<T, !>(f())) {
+ Ok(val) => val,
+ Err(_) => unreachable!(),
+ }
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if
+ /// the cell was empty. If the cell was empty and `f` failed, an
+ /// error is returned.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and
+ /// the cell remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`.
+ /// The exact outcome is unspecified. Current implementation
+ /// deadlocks, but this may be changed to a panic in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncOnceCell;
+ ///
+ /// let cell = SyncOnceCell::new();
+ /// assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
+ /// assert!(cell.get().is_none());
+ /// let value = cell.get_or_try_init(|| -> Result<i32, ()> {
+ /// Ok(92)
+ /// });
+ /// assert_eq!(value, Ok(&92));
+ /// assert_eq!(cell.get(), Some(&92))
+ /// ```
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ // Fast path check
+ // NOTE: We need to perform an acquire on the state in this method
+ // in order to correctly synchronize `SyncLazy::force`. This is
+ // currently done by calling `self.get()`, which in turn calls
+ // `self.is_initialized()`, which in turn performs the acquire.
+ if let Some(value) = self.get() {
+ return Ok(value);
+ }
+ self.initialize(f)?;
+
+ debug_assert!(self.is_initialized());
+
+ // SAFETY: The inner value has been initialized
+ Ok(unsafe { self.get_unchecked() })
+ }
+
+ /// Internal-only API that gets the contents of the cell, initializing it
+ /// in two steps with `f` and `g` if the cell was empty.
+ ///
+ /// `f` is called to construct the value, which is then moved into the cell
+ /// and given as a (pinned) mutable reference to `g` to finish
+ /// initialization.
+ ///
+ /// This allows `g` to inspect an manipulate the value after it has been
+ /// moved into its final place in the cell, but before the cell is
+ /// considered initialized.
+ ///
+ /// # Panics
+ ///
+ /// If `f` or `g` panics, the panic is propagated to the caller, and the
+ /// cell remains uninitialized.
+ ///
+ /// With the current implementation, if `g` panics, the value from `f` will
+ /// not be dropped. This should probably be fixed if this is ever used for
+ /// a type where this matters.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. The exact
+ /// outcome is unspecified. Current implementation deadlocks, but this may
+ /// be changed to a panic in the future.
+ pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
+ where
+ F: FnOnce() -> T,
+ G: FnOnce(Pin<&mut T>),
+ {
+ if let Some(value) = self.get_ref().get() {
+ // SAFETY: The inner value was already initialized, and will not be
+ // moved anymore.
+ return unsafe { Pin::new_unchecked(value) };
+ }
+
+ let slot = &self.value;
+
+ // Ignore poisoning from other threads
+ // If another thread panics, then we'll be able to run our closure
+ self.once.call_once_force(|_| {
+ let value = f();
+ // SAFETY: We use the Once (self.once) to guarantee unique access
+ // to the UnsafeCell (slot).
+ let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
+ // SAFETY: The value has been written to its final place in
+ // self.value. We do not to move it anymore, which we promise here
+ // with a Pin<&mut T>.
+ g(unsafe { Pin::new_unchecked(value) });
+ });
+
+ // SAFETY: The inner value has been initialized, and will not be moved
+ // anymore.
+ unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
+ }
+
+ /// Consumes the `SyncOnceCell`, returning the wrapped value. Returns
+ /// `None` if the cell was empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncOnceCell;
+ ///
+ /// let cell: SyncOnceCell<String> = SyncOnceCell::new();
+ /// assert_eq!(cell.into_inner(), None);
+ ///
+ /// let cell = SyncOnceCell::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.into_inner(), Some("hello".to_string()));
+ /// ```
+ pub fn into_inner(mut self) -> Option<T> {
+ self.take()
+ }
+
+ /// Takes the value out of this `SyncOnceCell`, moving it back to an uninitialized state.
+ ///
+ /// Has no effect and returns `None` if the `SyncOnceCell` hasn't been initialized.
+ ///
+ /// Safety is guaranteed by requiring a mutable reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncOnceCell;
+ ///
+ /// let mut cell: SyncOnceCell<String> = SyncOnceCell::new();
+ /// assert_eq!(cell.take(), None);
+ ///
+ /// let mut cell = SyncOnceCell::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.take(), Some("hello".to_string()));
+ /// assert_eq!(cell.get(), None);
+ /// ```
+ pub fn take(&mut self) -> Option<T> {
+ if self.is_initialized() {
+ self.once = Once::new();
+ // SAFETY: `self.value` is initialized and contains a valid `T`.
+ // `self.once` is reset, so `is_initialized()` will be false again
+ // which prevents the value from being read twice.
+ unsafe { Some((&mut *self.value.get()).assume_init_read()) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn is_initialized(&self) -> bool {
+ self.once.is_completed()
+ }
+
+ #[cold]
+ fn initialize<F, E>(&self, f: F) -> Result<(), E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ let mut res: Result<(), E> = Ok(());
+ let slot = &self.value;
+
+ // Ignore poisoning from other threads
+ // If another thread panics, then we'll be able to run our closure
+ self.once.call_once_force(|p| {
+ match f() {
+ Ok(value) => {
+ unsafe { (&mut *slot.get()).write(value) };
+ }
+ Err(e) => {
+ res = Err(e);
+
+ // Treat the underlying `Once` as poisoned since we
+ // failed to initialize our value. Calls
+ p.poison();
+ }
+ }
+ });
+ res
+ }
+
+ /// Safety: The value must be initialized
+ unsafe fn get_unchecked(&self) -> &T {
+ debug_assert!(self.is_initialized());
+ (&*self.value.get()).assume_init_ref()
+ }
+
+ /// Safety: The value must be initialized
+ unsafe fn get_unchecked_mut(&mut self) -> &mut T {
+ debug_assert!(self.is_initialized());
+ (&mut *self.value.get()).assume_init_mut()
+ }
+}
+
+unsafe impl<#[may_dangle] T> Drop for SyncOnceCell<T> {
+ fn drop(&mut self) {
+ if self.is_initialized() {
+ // Safety: The cell is initialized and being dropped, so it can't
+ // be accessed again. We also don't touch the `T` other than
+ // dropping it, which validates our usage of #[may_dangle].
+ unsafe { (&mut *self.value.get()).assume_init_drop() };
+ }
+ }
+}
+
+/// A value which is initialized on the first access.
+///
+/// This type is a thread-safe `Lazy`, and can be used in statics.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::collections::HashMap;
+///
+/// use std::lazy::SyncLazy;
+///
+/// static HASHMAP: SyncLazy<HashMap<i32, String>> = SyncLazy::new(|| {
+/// println!("initializing");
+/// let mut m = HashMap::new();
+/// m.insert(13, "Spica".to_string());
+/// m.insert(74, "Hoyten".to_string());
+/// m
+/// });
+///
+/// fn main() {
+/// println!("ready");
+/// std::thread::spawn(|| {
+/// println!("{:?}", HASHMAP.get(&13));
+/// }).join().unwrap();
+/// println!("{:?}", HASHMAP.get(&74));
+///
+/// // Prints:
+/// // ready
+/// // initializing
+/// // Some("Spica")
+/// // Some("Hoyten")
+/// }
+/// ```
+pub struct SyncLazy<T, F = fn() -> T> {
+ cell: SyncOnceCell<T>,
+ init: Cell<Option<F>>,
+}
+
+impl<T: fmt::Debug, F> fmt::Debug for SyncLazy<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
+ }
+}
+
+// We never create a `&F` from a `&SyncLazy<T, F>` so it is fine
+// to not impl `Sync` for `F`
+// we do create a `&mut Option<F>` in `force`, but this is
+// properly synchronized, so it only happens once
+// so it also does not contribute to this impl.
+unsafe impl<T, F: Send> Sync for SyncLazy<T, F> where SyncOnceCell<T>: Sync {}
+// auto-derived `Send` impl is OK.
+
+impl<T, F: UnwindSafe> RefUnwindSafe for SyncLazy<T, F> where SyncOnceCell<T>: RefUnwindSafe {}
+impl<T, F: UnwindSafe> UnwindSafe for SyncLazy<T, F> where SyncOnceCell<T>: UnwindSafe {}
+
+impl<T, F> SyncLazy<T, F> {
+ /// Creates a new lazy value with the given initializing
+ /// function.
+ pub const fn new(f: F) -> SyncLazy<T, F> {
+ SyncLazy { cell: SyncOnceCell::new(), init: Cell::new(Some(f)) }
+ }
+}
+
+impl<T, F: FnOnce() -> T> SyncLazy<T, F> {
+ /// Forces the evaluation of this lazy value and
+ /// returns a reference to result. This is equivalent
+ /// to the `Deref` impl, but is explicit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::SyncLazy;
+ ///
+ /// let lazy = SyncLazy::new(|| 92);
+ ///
+ /// assert_eq!(SyncLazy::force(&lazy), &92);
+ /// assert_eq!(&*lazy, &92);
+ /// ```
+ pub fn force(this: &SyncLazy<T, F>) -> &T {
+ this.cell.get_or_init(|| match this.init.take() {
+ Some(f) => f(),
+ None => panic!("Lazy instance has previously been poisoned"),
+ })
+ }
+}
+
+impl<T, F: FnOnce() -> T> Deref for SyncLazy<T, F> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ SyncLazy::force(self)
+ }
+}
+
+impl<T: Default> Default for SyncLazy<T> {
+ /// Creates a new lazy value using `Default` as the initializing function.
+ fn default() -> SyncLazy<T> {
+ SyncLazy::new(T::default)
+ }
+}
diff --git a/sgx_tstd/src/lib.rs b/sgx_tstd/src/lib.rs
index 9769cea..44db0f9 100644
--- a/sgx_tstd/src/lib.rs
+++ b/sgx_tstd/src/lib.rs
@@ -40,6 +40,7 @@
#![allow(dead_code)]
#![allow(deprecated)]
#![allow(unused_assignments)]
+#![allow(stable_features)]
#![feature(alloc_error_handler)]
#![feature(allocator_api)]
@@ -66,15 +67,19 @@
#![feature(fn_traits)]
#![feature(generator_trait)]
#![feature(format_args_nl)]
+#![feature(gen_future)]
#![feature(global_asm)]
#![feature(hashmap_internals)]
#![feature(int_error_internals)]
+#![feature(into_future)]
#![feature(lang_items)]
#![feature(llvm_asm)]
#![feature(log_syntax)]
+#![feature(maybe_uninit_extra)]
#![feature(maybe_uninit_ref)]
#![feature(never_type)]
#![feature(needs_panic_runtime)]
+#![feature(once_cell)]
#![feature(panic_unwind)]
#![feature(prelude_import)]
#![feature(ptr_internals)]
@@ -90,6 +95,7 @@
#![feature(unboxed_closures)]
#![feature(untagged_unions)]
#![feature(unwind_attributes)]
+#![feature(wake_trait)]
#![feature(libc)]
#![feature(panic_internals)]
#![feature(std_internals)]
@@ -213,10 +219,15 @@
pub mod enclave;
pub mod untrusted;
+pub mod lazy;
+
pub mod task {
//! Types and Traits for working with asynchronous tasks.
#[doc(inline)]
pub use core::task::*;
+
+ #[doc(inline)]
+ pub use alloc_crate::task::*;
}
pub mod future;
diff --git a/sgx_tstd/src/prelude/v1.rs b/sgx_tstd/src/prelude/v1.rs
index 44a68f1..c6b5d7b 100644
--- a/sgx_tstd/src/prelude/v1.rs
+++ b/sgx_tstd/src/prelude/v1.rs
@@ -57,6 +57,9 @@
PartialEq, PartialOrd, RustcDecodable, RustcEncodable,
};
+#[cfg(derive_macros)]
+#[doc(hidden)]
+pub use core::prelude::v1::derive;
#[doc(hidden)]
pub use core::prelude::v1::cfg_accessible;
diff --git a/sgx_tstd/src/sync/once.rs b/sgx_tstd/src/sync/once.rs
index d9158fa..21201de 100644
--- a/sgx_tstd/src/sync/once.rs
+++ b/sgx_tstd/src/sync/once.rs
@@ -102,6 +102,7 @@
#[derive(Debug)]
pub struct OnceState {
poisoned: bool,
+ set_state_on_drop_to: Cell<usize>,
}
/// Initialization value for static [`Once`] values.
@@ -215,7 +216,7 @@
}
let mut f = Some(f);
- self.call_inner(true, &mut |p| f.take().unwrap()(&OnceState { poisoned: p }));
+ self.call_inner(true, &mut |p| f.take().unwrap()(p));
}
/// Returns `true` if some `call_once` call has completed
@@ -252,7 +253,7 @@
// currently no way to take an `FnOnce` and call it via virtual dispatch
// without some allocation overhead.
#[cold]
- fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(bool)) {
+ fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) {
let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
loop {
match state_and_queue {
@@ -263,12 +264,13 @@
}
POISONED | INCOMPLETE => {
// Try to register this thread as the one RUNNING.
- let old = self.state_and_queue.compare_and_swap(
+ let exchange_result = self.state_and_queue.compare_exchange(
state_and_queue,
RUNNING,
Ordering::Acquire,
+ Ordering::Acquire,
);
- if old != state_and_queue {
+ if let Err(old) = exchange_result {
state_and_queue = old;
continue;
}
@@ -280,8 +282,12 @@
};
// Run the initialization function, letting it know if we're
// poisoned or not.
- init(state_and_queue == POISONED);
- waiter_queue.set_state_on_drop_to = COMPLETE;
+ let init_state = OnceState {
+ poisoned: state_and_queue == POISONED,
+ set_state_on_drop_to: Cell::new(COMPLETE),
+ };
+ init(&init_state);
+ waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get();
break;
}
_ => {
@@ -316,8 +322,13 @@
// Try to slide in the node at the head of the linked list, making sure
// that another thread didn't just replace the head of the linked list.
- let old = state_and_queue.compare_and_swap(current_state, me | RUNNING, Ordering::Release);
- if old != current_state {
+ let exchange_result = state_and_queue.compare_exchange(
+ current_state,
+ me | RUNNING,
+ Ordering::Release,
+ Ordering::Relaxed,
+ );
+ if let Err(old) = exchange_result {
current_state = old;
continue;
}
@@ -363,7 +374,7 @@
let mut queue = (state_and_queue & !STATE_MASK) as *const Waiter;
while !queue.is_null() {
let next = (*queue).next;
- let thread = (*queue).thread.replace(None).unwrap();
+ let thread = (*queue).thread.take().unwrap();
(*queue).signaled.store(true, Ordering::Release);
// ^- FIXME (maybe): This is another case of issue #55005
// `store()` has a potentially dangling ref to `signaled`.
@@ -384,4 +395,10 @@
pub fn poisoned(&self) -> bool {
self.poisoned
}
+
+ /// Poison the associated [`Once`] without explicitly panicking.
+ // NOTE: This is currently only exposed for the `lazy` module
+ pub(crate) fn poison(&self) {
+ self.set_state_on_drop_to.set(POISONED);
+ }
}
\ No newline at end of file
diff --git a/sgx_urts/src/event.rs b/sgx_urts/src/event.rs
index 8b1f06e..ac28d26 100644
--- a/sgx_urts/src/event.rs
+++ b/sgx_urts/src/event.rs
@@ -39,7 +39,9 @@
if ret < 0 {
let err = Error::last_os_error().raw_os_error().unwrap_or(0);
if err == libc::ETIMEDOUT {
- self.event.compare_and_swap(-1, 0, Ordering::SeqCst);
+ let _ = self
+ .event
+ .compare_exchange(-1, 0, Ordering::SeqCst, Ordering::SeqCst);
return -1;
}
}
diff --git a/xargo/sgx_tstd/build.rs b/xargo/sgx_tstd/build.rs
index 3b90fc9..8d99763 100644
--- a/xargo/sgx_tstd/build.rs
+++ b/xargo/sgx_tstd/build.rs
@@ -17,6 +17,7 @@
use std::env;
use std::path::Path;
+use std::process::Command;
fn main() {
if cfg!(feature = "backtrace") {
@@ -38,4 +39,68 @@
println!("cargo:rustc-link-search=native={}/lib64", sdk_dir);
println!("cargo:rustc-link-lib=static=sgx_pthread");
}
+
+ // since nightly-2020-11-26 (rustc 2020-11-25), auto_traits replaced
+ // optin_builtin_traits
+ // see https://github.com/rust-lang/rust/commit/810324d1f31eb8d75e8f0044df720652986ef133
+ if let Some(true) = is_min_date("2020-11-25") {
+ println!("cargo:rustc-cfg=enable_auto_traits");
+ }
+
+ // nightly-2021-02-08 (rustc 2021-02-07)
+ // https://github.com/rust-lang/rust/commit/dbdbd30bf2cb0d48c8bbce83c2458592664dbb18
+ if let Some(true) = is_min_date("2021-02-07") {
+ println!("cargo:rustc-cfg=derive_macros");
+ }
+}
+
+// code below copied from crate version_check
+// we want to remove the build dependencies to make the dependency tree
+// as clean as possible. the following codes credit to SergioBenitez
+#[derive(Debug, PartialEq, Eq, Copy, Clone, PartialOrd, Ord)]
+struct Date(u32);
+
+impl Date {
+ fn read() -> Option<Date> {
+ get_version_and_date()
+ .and_then(|(_, date)| date)
+ .and_then(|date| Date::parse(&date))
+ }
+
+ fn parse(date: &str) -> Option<Date> {
+ let ymd: Vec<u32> = date.split("-")
+ .filter_map(|s| s.parse::<u32>().ok())
+ .collect();
+
+ if ymd.len() != 3 {
+ return None
+ }
+
+ let (y, m, d) = (ymd[0], ymd[1], ymd[2]);
+ Some(Date((y << 9) | ((m & 0xF) << 5) | (d & 0x1F)))
+ }
+}
+
+fn get_version_and_date() -> Option<(Option<String>, Option<String>)> {
+ env::var("RUSTC").ok()
+ .and_then(|rustc| Command::new(rustc).arg("--version").output().ok())
+ .or_else(|| Command::new("rustc").arg("--version").output().ok())
+ .and_then(|output| String::from_utf8(output.stdout).ok())
+ .map(|s| version_and_date_from_rustc_version(&s))
+}
+
+fn version_and_date_from_rustc_version(s: &str) -> (Option<String>, Option<String>) {
+ let last_line = s.lines().last().unwrap_or(s);
+ let mut components = last_line.trim().split(" ");
+ let version = components.nth(1);
+ let date = components.filter(|c| c.ends_with(')')).next()
+ .map(|s| s.trim_end().trim_end_matches(")").trim_start().trim_start_matches('('));
+ (version.map(|s| s.to_string()), date.map(|s| s.to_string()))
+}
+
+fn is_min_date(min_date: &str) -> Option<bool> {
+ match (Date::read(), Date::parse(min_date)) {
+ (Some(rustc_date), Some(min_date)) => Some(rustc_date >= min_date),
+ _ => None
+ }
}
diff --git a/xargo/x86_64-unknown-linux-sgx.json b/xargo/x86_64-unknown-linux-sgx.json
index 10d37a7..69b38be 100644
--- a/xargo/x86_64-unknown-linux-sgx.json
+++ b/xargo/x86_64-unknown-linux-sgx.json
@@ -1,7 +1,7 @@
{
"arch": "x86_64",
"cpu": "x86-64",
- "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
+ "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
"dynamic-linking": true,
"env": "sgx",
"exe-allocation-crate": "alloc_system",
@@ -22,7 +22,14 @@
]
},
"relro-level": "full",
- "stack-probes": true,
+ "stack-probes": {
+ "kind": "inline-or-call",
+ "min-llvm-version-for-inline": [
+ 11,
+ 0,
+ 1
+ ]
+ },
"target-c-int-width": "32",
"target-endian": "little",
"target-family": "unix",