Merge pull request #165 from baidu/master
Merge from master
diff --git a/rust-toolchain b/rust-toolchain
index 4390c8a..fb7648c 100644
--- a/rust-toolchain
+++ b/rust-toolchain
@@ -1 +1 @@
-nightly-2019-08-01
+nightly-2019-10-04
diff --git a/sgx_tstd/src/collections/hash/map.rs b/sgx_tstd/src/collections/hash/map.rs
index 1a37532..ae61a55 100644
--- a/sgx_tstd/src/collections/hash/map.rs
+++ b/sgx_tstd/src/collections/hash/map.rs
@@ -39,7 +39,7 @@
use core::iter::{FromIterator, FusedIterator};
use core::mem::{self, replace};
use core::ops::{Deref, DerefMut, Index};
-use crate::collections::CollectionAllocErr;
+use crate::collections::TryReserveError;
use crate::sys;
use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable,
@@ -64,7 +64,7 @@
/// provide that capacity, accounting for maximum loading. The raw capacity
/// is always zero or a power of two.
#[inline]
- fn try_raw_capacity(&self, len: usize) -> Result<usize, CollectionAllocErr> {
+ fn try_raw_capacity(&self, len: usize) -> Result<usize, TryReserveError> {
if len == 0 {
Ok(0)
} else {
@@ -74,7 +74,7 @@
let mut raw_cap = len.checked_mul(11)
.map(|l| l / 10)
.and_then(|l| l.checked_next_power_of_two())
- .ok_or(CollectionAllocErr::CapacityOverflow)?;
+ .ok_or(TryReserveError::CapacityOverflow)?;
raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap);
Ok(raw_cap)
@@ -731,8 +731,8 @@
#[inline]
pub fn reserve(&mut self, additional: usize) {
match self.reserve_internal(additional, Infallible) {
- Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
- Err(CollectionAllocErr::AllocErr) => unreachable!(),
+ Err(TryReserveError::CapacityOverflow) => panic!("capacity overflow"),
+ Err(TryReserveError::AllocError{layout:_, non_exhaustive:_}) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
@@ -746,19 +746,19 @@
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
- pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.reserve_internal(additional, Fallible)
}
#[inline]
fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility)
- -> Result<(), CollectionAllocErr> {
+ -> Result<(), TryReserveError> {
let remaining = self.capacity() - self.len(); // this can't overflow
if remaining < additional {
let min_cap = self.len()
.checked_add(additional)
- .ok_or(CollectionAllocErr::CapacityOverflow)?;
+ .ok_or(TryReserveError::CapacityOverflow)?;
let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?;
self.try_resize(raw_cap, fallibility)?;
} else if self.table.tag() && remaining <= self.len() {
@@ -781,7 +781,7 @@
&mut self,
new_raw_cap: usize,
fallibility: Fallibility,
- ) -> Result<(), CollectionAllocErr> {
+ ) -> Result<(), TryReserveError> {
assert!(self.table.size() <= new_raw_cap);
assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
@@ -2423,4 +2423,4 @@
-> Drain<'new, &'new str, &'new str> {
d
}
-}
\ No newline at end of file
+}
diff --git a/sgx_tstd/src/collections/hash/table.rs b/sgx_tstd/src/collections/hash/table.rs
index 91afbef..eb8419b 100644
--- a/sgx_tstd/src/collections/hash/table.rs
+++ b/sgx_tstd/src/collections/hash/table.rs
@@ -27,7 +27,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::alloc::{Global, Alloc, Layout, LayoutErr, handle_alloc_error};
-use crate::collections::CollectionAllocErr;
+use crate::collections::TryReserveError;
use core::hash::{BuildHasher, Hash, Hasher};
use core::marker;
use core::mem::{self, size_of, needs_drop};
@@ -690,7 +690,7 @@
unsafe fn new_uninitialized_internal(
capacity: usize,
fallibility: Fallibility,
- ) -> Result<RawTable<K, V>, CollectionAllocErr> {
+ ) -> Result<RawTable<K, V>, TryReserveError> {
if capacity == 0 {
return Ok(RawTable {
size: 0,
@@ -705,9 +705,9 @@
// we just allocate a single array, and then have the subarrays
// point into it.
let (layout, _) = calculate_layout::<K, V>(capacity)?;
- let buffer = Global.alloc(layout).map_err(|e| match fallibility {
+ let buffer = Global.alloc(layout).map_err(|_e| match fallibility {
Infallible => handle_alloc_error(layout),
- Fallible => e,
+ Fallible => TryReserveError::AllocError { layout, non_exhaustive: () },
})?;
Ok(RawTable {
@@ -722,8 +722,8 @@
/// at the very least, set every hash to EMPTY_BUCKET.
unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
match Self::new_uninitialized_internal(capacity, Infallible) {
- Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
- Err(CollectionAllocErr::AllocErr) => unreachable!(),
+ Err(TryReserveError::CapacityOverflow) => panic!("capacity overflow"),
+ Err(TryReserveError::AllocError { layout:_, non_exhaustive:_ }) => unreachable!(),
Ok(table) => { table }
}
}
@@ -747,7 +747,7 @@
fn new_internal(
capacity: usize,
fallibility: Fallibility,
- ) -> Result<RawTable<K, V>, CollectionAllocErr> {
+ ) -> Result<RawTable<K, V>, TryReserveError> {
unsafe {
let ret = RawTable::new_uninitialized_internal(capacity, fallibility)?;
if capacity > 0 {
@@ -760,7 +760,7 @@
/// Tries to create a new raw table from a given capacity. If it cannot allocate,
/// it returns with AllocErr.
#[inline]
- pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
+ pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, TryReserveError> {
Self::new_internal(capacity, Fallible)
}
@@ -769,8 +769,8 @@
#[inline]
pub fn new(capacity: usize) -> RawTable<K, V> {
match Self::new_internal(capacity, Infallible) {
- Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
- Err(CollectionAllocErr::AllocErr) => unreachable!(),
+ Err(TryReserveError::CapacityOverflow) => panic!("capacity overflow"),
+ Err(TryReserveError::AllocError{layout:_, non_exhaustive:_}) => unreachable!(),
Ok(table) => { table }
}
}
diff --git a/sgx_tstd/src/collections/mod.rs b/sgx_tstd/src/collections/mod.rs
index 4c8d085..b023f60 100644
--- a/sgx_tstd/src/collections/mod.rs
+++ b/sgx_tstd/src/collections/mod.rs
@@ -43,7 +43,7 @@
pub use self::hash_map::HashMap;
pub use self::hash_set::HashSet;
-pub use alloc_crate::collections::CollectionAllocErr;
+pub use alloc_crate::collections::TryReserveError;
mod hash;
diff --git a/sgx_tstd/src/lib.rs b/sgx_tstd/src/lib.rs
index 74fe1ea..4791431 100644
--- a/sgx_tstd/src/lib.rs
+++ b/sgx_tstd/src/lib.rs
@@ -56,13 +56,13 @@
#![feature(alloc_error_handler)]
#![feature(panic_unwind)]
-#![feature(__rust_unstable_column)]
+//#![feature(__rust_unstable_column)]
#![feature(allocator_api)]
#![feature(allow_internal_unsafe)]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(array_error_internals)]
-#![feature(bind_by_move_pattern_guards)]
+//#![feature(bind_by_move_pattern_guards)]
#![feature(asm)]
#![feature(box_syntax)]
#![feature(c_variadic)]
@@ -111,7 +111,7 @@
#![feature(unicode_internals)]
#![feature(alloc_layout_extra)]
#![feature(non_exhaustive)]
-#![feature(const_vec_new)]
+//#![feature(const_vec_new)]
#![feature(vec_remove_item)]
#![feature(int_error_matching)]
#![default_lib_allocator]
diff --git a/sgx_tstd/src/prelude/v1.rs b/sgx_tstd/src/prelude/v1.rs
index 5720cb3..bebb750 100644
--- a/sgx_tstd/src/prelude/v1.rs
+++ b/sgx_tstd/src/prelude/v1.rs
@@ -49,7 +49,6 @@
#[doc(no_inline)]
pub use core::prelude::v1::{
- __rust_unstable_column,
asm,
assert,
cfg,
@@ -81,9 +80,7 @@
Clone,
Copy,
Debug,
- Decodable,
Default,
- Encodable,
Eq,
Hash,
Ord,
@@ -91,7 +88,7 @@
PartialOrd,
RustcDecodable,
RustcEncodable,
- bench,
+// bench,
global_allocator,
test,
test_case,