diff --git a/src/doc/rustc/src/codegen-options/index.md b/src/doc/rustc/src/codegen-options/index.md index dbe281be7df74..c638f88057a52 100644 --- a/src/doc/rustc/src/codegen-options/index.md +++ b/src/doc/rustc/src/codegen-options/index.md @@ -464,7 +464,15 @@ machine. Each target has a default base CPU. Individual targets will support different features; this flag lets you control enabling or disabling a feature. Each feature should be prefixed with a `+` to -enable it or `-` to disable it. Separate multiple features with commas. +enable it or `-` to disable it. + +Features from multiple `-C target-feature` options are combined. \ +Multiple features can be specified in a single option by separating them +with commas - `-C target-feature=+x,-y`. \ +If some feature is specified more than once with both `+` and `-`, +then values passed later override values passed earlier. \ +For example, `-C target-feature=+x,-y,+z -Ctarget-feature=-x,+y` +is equivalent to `-C target-feature=-x,+y,+z`. To see the valid options and an example of use, run `rustc --print target-features`. diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 0f0bd617f439c..bbe80c26dcbf9 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -5,8 +5,7 @@ //! of individual objects while the arena itself is still alive. The benefit //! of an arena is very fast allocation; just a pointer bump. //! -//! This crate implements `TypedArena`, a simple arena that can only hold -//! objects of a single type. +//! This crate implements several kinds of arena. #![doc( html_root_url = "https://doc.rust-lang.org/nightly/", @@ -98,7 +97,13 @@ impl TypedArenaChunk { } } +// The arenas start with PAGE-sized chunks, and then each new chunk is twice as +// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon +// we stop growing. This scales well, from arenas that are barely used up to +// arenas that are used for 100s of MiBs. Note also that the chosen sizes match +// the usual sizes of pages and huge pages on Linux. const PAGE: usize = 4096; +const HUGE_PAGE: usize = 2 * 1024 * 1024; impl Default for TypedArena { /// Creates a new `TypedArena`. @@ -211,6 +216,9 @@ impl TypedArena { #[cold] fn grow(&self, n: usize) { unsafe { + // We need the element size in to convert chunk sizes (ranging from + // PAGE to HUGE_PAGE bytes) to element counts. + let elem_size = cmp::max(1, mem::size_of::()); let mut chunks = self.chunks.borrow_mut(); let (chunk, mut new_capacity); if let Some(last_chunk) = chunks.last_mut() { @@ -221,18 +229,20 @@ impl TypedArena { self.end.set(last_chunk.end()); return; } else { + // If the previous chunk's capacity is less than HUGE_PAGE + // bytes, then this chunk will be least double the previous + // chunk's size. new_capacity = last_chunk.storage.capacity(); - loop { + if new_capacity < HUGE_PAGE / elem_size { new_capacity = new_capacity.checked_mul(2).unwrap(); - if new_capacity >= currently_used_cap + n { - break; - } } } } else { - let elem_size = cmp::max(1, mem::size_of::()); - new_capacity = cmp::max(n, PAGE / elem_size); + new_capacity = PAGE / elem_size; } + // Also ensure that this chunk can fit `n`. + new_capacity = cmp::max(n, new_capacity); + chunk = TypedArenaChunk::::new(new_capacity); self.ptr.set(chunk.start()); self.end.set(chunk.end()); @@ -347,17 +357,20 @@ impl DroplessArena { self.end.set(last_chunk.end()); return; } else { + // If the previous chunk's capacity is less than HUGE_PAGE + // bytes, then this chunk will be least double the previous + // chunk's size. new_capacity = last_chunk.storage.capacity(); - loop { + if new_capacity < HUGE_PAGE { new_capacity = new_capacity.checked_mul(2).unwrap(); - if new_capacity >= used_bytes + needed_bytes { - break; - } } } } else { - new_capacity = cmp::max(needed_bytes, PAGE); + new_capacity = PAGE; } + // Also ensure that this chunk can fit `needed_bytes`. + new_capacity = cmp::max(needed_bytes, new_capacity); + chunk = TypedArenaChunk::::new(new_capacity); self.ptr.set(chunk.start()); self.end.set(chunk.end()); diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index bb68184c8dd77..d74df82bddd9d 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -4,47 +4,182 @@ use crate::ops::{self, Add, Sub, Try}; use super::{FusedIterator, TrustedLen}; -/// Objects that can be stepped over in both directions. +/// Objects that have a notion of *successor* and *predecessor* operations. /// -/// The `steps_between` function provides a way to efficiently compare -/// two `Step` objects. -#[unstable( - feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168" -)] -pub trait Step: Clone + PartialOrd + Sized { - /// Returns the number of steps between two step objects. The count is - /// inclusive of `start` and exclusive of `end`. - /// - /// Returns `None` if it is not possible to calculate `steps_between` - /// without overflow. +/// The *successor* operation moves towards values that compare greater. +/// The *predecessor* operation moves towards values that compare lesser. +/// +/// # Safety +/// +/// This trait is `unsafe` because its implementation must be correct for +/// the safety of `unsafe trait TrustedLen` implementations, and the results +/// of using this trait can otherwise be trusted by `unsafe` code to be correct +/// and fulfill the listed obligations. +#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] +pub unsafe trait Step: Clone + PartialOrd + Sized { + /// Returns the number of *successor* steps required to get from `start` to `end`. + /// + /// Returns `None` if the number of steps would overflow `usize` + /// (or is infinite, or if `end` would never be reached). + /// + /// # Invariants + /// + /// For any `a`, `b`, and `n`: + /// + /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)` + /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&a, n) == Some(a)` + /// * `steps_between(&a, &b) == Some(n)` only if `a <= b` + /// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b` + /// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`; + /// this is the case wheen it would require more than `usize::MAX` steps to get to `b` + /// * `steps_between(&a, &b) == None` if `a > b` fn steps_between(start: &Self, end: &Self) -> Option; - /// Replaces this step with `1`, returning a clone of itself. + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, returns `None`. /// - /// The output of this method should always be greater than the output of replace_zero. - fn replace_one(&mut self) -> Self; + /// # Invariants + /// + /// For any `a`, `n`, and `m`: + /// + /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))` + /// + /// For any `a`, `n`, and `m` where `n + m` does not overflow: + /// + /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)` + /// + /// For any `a` and `n`: + /// + /// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))` + /// * Corollary: `Step::forward_checked(&a, 0) == Some(a)` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn forward_checked(start: Self, count: usize) -> Option; - /// Replaces this step with `0`, returning a clone of itself. + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, + /// this function is allowed to panic, wrap, or saturate. + /// The suggested behavior is to panic when debug assertions are enabled, + /// and to wrap or saturate otherwise. /// - /// The output of this method should always be less than the output of replace_one. - fn replace_zero(&mut self) -> Self; + /// Unsafe code should not rely on the correctness of behavior after overflow. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`, where no overflow occurs: + /// + /// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)` + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))` + /// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))` + /// * Corollary: `Step::forward(a, 0) == a` + /// * `Step::forward(a, n) >= a` + /// * `Step::backward(Step::forward(a, n), n) == a` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn forward(start: Self, count: usize) -> Self { + Step::forward_checked(start, count).expect("overflow in `Step::forward`") + } - /// Adds one to this step, returning the result. - fn add_one(&self) -> Self; + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// # Safety + /// + /// It is undefined behavior for this operation to overflow the + /// range of values supported by `Self`. If you cannot guarantee that this + /// will not overflow, use `forward` or `forward_checked` instead. + /// + /// # Invariants + /// + /// For any `a`: + /// + /// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)` + /// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`, + /// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`. + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)` + #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")] + unsafe fn forward_unchecked(start: Self, count: usize) -> Self { + Step::forward(start, count) + } - /// Subtracts one to this step, returning the result. - fn sub_one(&self) -> Self; + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, returns `None`. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`: + /// + /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))` + /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }` + /// + /// For any `a` and `n`: + /// + /// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))` + /// * Corollary: `Step::backward_checked(&a, 0) == Some(a)` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn backward_checked(start: Self, count: usize) -> Option; - /// Adds a `usize`, returning `None` on overflow. - fn add_usize(&self, n: usize) -> Option; + /// Returns the value that would be obtained by taking the *predecessor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, + /// this function is allowed to panic, wrap, or saturate. + /// The suggested behavior is to panic when debug assertions are enabled, + /// and to wrap or saturate otherwise. + /// + /// Unsafe code should not rely on the correctness of behavior after overflow. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`, where no overflow occurs: + /// + /// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)` + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))` + /// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))` + /// * Corollary: `Step::backward(a, 0) == a` + /// * `Step::backward(a, n) <= a` + /// * `Step::forward(Step::backward(a, n), n) == a` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn backward(start: Self, count: usize) -> Self { + Step::backward_checked(start, count).expect("overflow in `Step::backward`") + } - /// Subtracts a `usize`, returning `None` on underflow. - fn sub_usize(&self, n: usize) -> Option { - // this default implementation makes the addition of `sub_usize` a non-breaking change - let _ = n; - unimplemented!() + /// Returns the value that would be obtained by taking the *predecessor* + /// of `self` `count` times. + /// + /// # Safety + /// + /// It is undefined behavior for this operation to overflow the + /// range of values supported by `Self`. If you cannot guarantee that this + /// will not overflow, use `backward` or `backward_checked` instead. + /// + /// # Invariants + /// + /// For any `a`: + /// + /// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)` + /// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`, + /// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`. + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)` + #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")] + unsafe fn backward_unchecked(start: Self, count: usize) -> Self { + Step::backward(start, count) } } @@ -52,127 +187,218 @@ pub trait Step: Clone + PartialOrd + Sized { macro_rules! step_identical_methods { () => { #[inline] - fn replace_one(&mut self) -> Self { - mem::replace(self, 1) + unsafe fn forward_unchecked(start: Self, n: usize) -> Self { + start.unchecked_add(n as Self) } #[inline] - fn replace_zero(&mut self) -> Self { - mem::replace(self, 0) + unsafe fn backward_unchecked(start: Self, n: usize) -> Self { + start.unchecked_sub(n as Self) } #[inline] - fn add_one(&self) -> Self { - Add::add(*self, 1) + fn forward(start: Self, n: usize) -> Self { + // In debug builds, trigger a panic on overflow. + // This should optimize completely out in release builds. + if Self::forward_checked(start, n).is_none() { + let _ = Add::add(Self::MAX, 1); + } + // Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`. + start.wrapping_add(n as Self) } #[inline] - fn sub_one(&self) -> Self { - Sub::sub(*self, 1) + fn backward(start: Self, n: usize) -> Self { + // In debug builds, trigger a panic on overflow. + // This should optimize completely out in release builds. + if Self::backward_checked(start, n).is_none() { + let _ = Sub::sub(Self::MIN, 1); + } + // Do wrapping math to allow e.g. `Step::backward(127i8, 255)`. + start.wrapping_sub(n as Self) } }; } -macro_rules! step_impl_unsigned { - ($($t:ty)*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168")] - impl Step for $t { - #[inline] - fn steps_between(start: &$t, end: &$t) -> Option { - if *start < *end { - usize::try_from(*end - *start).ok() - } else { - Some(0) +macro_rules! step_integer_impls { + { + narrower than or same width as usize: + $( [ $u_narrower:ident $i_narrower:ident ] ),+; + wider than usize: + $( [ $u_wider:ident $i_wider:ident ] ),+; + } => { + $( + #[allow(unreachable_patterns)] + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $u_narrower { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + // This relies on $u_narrower <= usize + Some((*end - *start) as usize) + } else { + None + } } - } - #[inline] - #[allow(unreachable_patterns)] - fn add_usize(&self, n: usize) -> Option { - match <$t>::try_from(n) { - Ok(n_as_t) => self.checked_add(n_as_t), - Err(_) => None, + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + match Self::try_from(n) { + Ok(n) => start.checked_add(n), + Err(_) => None, // if n is out of range, `unsigned_start + n` is too + } + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + match Self::try_from(n) { + Ok(n) => start.checked_sub(n), + Err(_) => None, // if n is out of range, `unsigned_start - n` is too + } } } - #[inline] #[allow(unreachable_patterns)] - fn sub_usize(&self, n: usize) -> Option { - match <$t>::try_from(n) { - Ok(n_as_t) => self.checked_sub(n_as_t), - Err(_) => None, + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $i_narrower { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + // This relies on $i_narrower <= usize + // + // Casting to isize extends the width but preserves the sign. + // Use wrapping_sub in isize space and cast to usize to compute + // the difference that may not fit inside the range of isize. + Some((*end as isize).wrapping_sub(*start as isize) as usize) + } else { + None + } } - } - step_identical_methods!(); - } - )*) -} -macro_rules! step_impl_signed { - ($( [$t:ty : $unsigned:ty] )*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168")] - impl Step for $t { - #[inline] - fn steps_between(start: &$t, end: &$t) -> Option { - if *start < *end { - // Use .wrapping_sub and cast to unsigned to compute the - // difference that may not fit inside the range of $t. - usize::try_from(end.wrapping_sub(*start) as $unsigned).ok() - } else { - Some(0) + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + match $u_narrower::try_from(n) { + Ok(n) => { + // Wrapping handles cases like + // `Step::forward(-120_i8, 200) == Some(80_i8)`, + // even though 200 is out of range for i8. + let wrapped = start.wrapping_add(n as Self); + if wrapped >= start { + Some(wrapped) + } else { + None // Addition overflowed + } + } + // If n is out of range of e.g. u8, + // then it is bigger than the entire range for i8 is wide + // so `any_i8 + n` necessarily overflows i8. + Err(_) => None, + } + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + match $u_narrower::try_from(n) { + Ok(n) => { + // Wrapping handles cases like + // `Step::forward(-120_i8, 200) == Some(80_i8)`, + // even though 200 is out of range for i8. + let wrapped = start.wrapping_sub(n as Self); + if wrapped <= start { + Some(wrapped) + } else { + None // Subtraction overflowed + } + } + // If n is out of range of e.g. u8, + // then it is bigger than the entire range for i8 is wide + // so `any_i8 - n` necessarily overflows i8. + Err(_) => None, + } } } + )+ - #[inline] + $( #[allow(unreachable_patterns)] - fn add_usize(&self, n: usize) -> Option { - match <$unsigned>::try_from(n) { - Ok(n_as_unsigned) => { - // Wrapping in unsigned space handles cases like - // `-120_i8.add_usize(200) == Some(80_i8)`, - // even though 200_usize is out of range for i8. - let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t; - if wrapped >= *self { - Some(wrapped) - } else { - None // Addition overflowed - } + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $u_wider { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + usize::try_from(*end - *start).ok() + } else { + None } - Err(_) => None, + } + + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + start.checked_add(n as Self) + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + start.checked_sub(n as Self) } } - #[inline] #[allow(unreachable_patterns)] - fn sub_usize(&self, n: usize) -> Option { - match <$unsigned>::try_from(n) { - Ok(n_as_unsigned) => { - // Wrapping in unsigned space handles cases like - // `80_i8.sub_usize(200) == Some(-120_i8)`, - // even though 200_usize is out of range for i8. - let wrapped = (*self as $unsigned).wrapping_sub(n_as_unsigned) as $t; - if wrapped <= *self { - Some(wrapped) - } else { - None // Subtraction underflowed + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $i_wider { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + match end.checked_sub(*start) { + Some(result) => usize::try_from(result).ok(), + // If the difference is too big for e.g. i128, + // it's also gonna be too big for usize with fewer bits. + None => None, } + } else { + None } - Err(_) => None, + } + + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + start.checked_add(n as Self) + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + start.checked_sub(n as Self) } } + )+ + }; +} - step_identical_methods!(); - } - )*) +#[cfg(target_pointer_width = "64")] +step_integer_impls! { + narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize]; + wider than usize: [u128 i128]; } -step_impl_unsigned!(usize u8 u16 u32 u64 u128); -step_impl_signed!([isize: usize][i8: u8][i16: u16]); -step_impl_signed!([i32: u32][i64: u64][i128: u128]); +#[cfg(target_pointer_width = "32")] +step_integer_impls! { + narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize]; + wider than usize: [u64 i64], [u128 i128]; +} + +#[cfg(target_pointer_width = "16")] +step_integer_impls! { + narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize]; + wider than usize: [u32 i32], [u64 i64], [u128 i128]; +} macro_rules! range_exact_iter_impl { ($($t:ty)*) => ($( @@ -188,20 +414,6 @@ macro_rules! range_incl_exact_iter_impl { )*) } -macro_rules! range_trusted_len_impl { - ($($t:ty)*) => ($( - #[unstable(feature = "trusted_len", issue = "37572")] - unsafe impl TrustedLen for ops::Range<$t> { } - )*) -} - -macro_rules! range_incl_trusted_len_impl { - ($($t:ty)*) => ($( - #[unstable(feature = "trusted_len", issue = "37572")] - unsafe impl TrustedLen for ops::RangeInclusive<$t> { } - )*) -} - #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for ops::Range { type Item = A; @@ -209,16 +421,12 @@ impl Iterator for ops::Range { #[inline] fn next(&mut self) -> Option { if self.start < self.end { - // We check for overflow here, even though it can't actually - // happen. Adding this check does however help llvm vectorize loops - // for some ranges that don't get vectorized otherwise, - // and this won't actually result in an extra check in an optimized build. - if let Some(mut n) = self.start.add_usize(1) { - mem::swap(&mut n, &mut self.start); - Some(n) - } else { - None - } + // SAFETY: just checked precondition + // We use the unchecked version here, because + // this helps LLVM vectorize loops for some ranges + // that don't get vectorized otherwise. + let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) }; + Some(mem::replace(&mut self.start, n)) } else { None } @@ -226,17 +434,19 @@ impl Iterator for ops::Range { #[inline] fn size_hint(&self) -> (usize, Option) { - match Step::steps_between(&self.start, &self.end) { - Some(hint) => (hint, Some(hint)), - None => (usize::MAX, None), + if self.start < self.end { + let hint = Step::steps_between(&self.start, &self.end); + (hint.unwrap_or(usize::MAX), hint) + } else { + (0, Some(0)) } } #[inline] fn nth(&mut self, n: usize) -> Option { - if let Some(plus_n) = self.start.add_usize(n) { + if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) { if plus_n < self.end { - self.start = plus_n.add_one(); + self.start = Step::forward(plus_n.clone(), 1); return Some(plus_n); } } @@ -262,25 +472,42 @@ impl Iterator for ops::Range { } // These macros generate `ExactSizeIterator` impls for various range types. -// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded -// because they cannot guarantee having a length <= usize::MAX, which is -// required by ExactSizeIterator. -range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32); -range_incl_exact_iter_impl!(u8 u16 i8 i16); - -// These macros generate `TrustedLen` impls. // -// They need to guarantee that .size_hint() is either exact, or that -// the upper bound is None when it does not fit the type limits. -range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); -range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); +// * `ExactSizeIterator::len` is required to always return an exact `usize`, +// so no range can be longer than `usize::MAX`. +// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`. +// For integer types in `RangeInclusive<_>` +// this is the case for types *strictly narrower* than `usize` +// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`. +range_exact_iter_impl! { + usize u8 u16 + isize i8 i16 + + // These are incorect per the reasoning above, + // but removing them would be a breaking change as they were stabilized in Rust 1.0.0. + // So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings + // on 16-bit platforms, but continue to give a wrong result. + u32 + i32 +} +range_incl_exact_iter_impl! { + u8 + i8 + + // These are incorect per the reasoning above, + // but removing them would be a breaking change as they were stabilized in Rust 1.26.0. + // So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings + // on 16-bit platforms, but continue to give a wrong result. + u16 + i16 +} #[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for ops::Range { #[inline] fn next_back(&mut self) -> Option { if self.start < self.end { - self.end = self.end.sub_one(); + self.end = Step::backward(self.end.clone(), 1); Some(self.end.clone()) } else { None @@ -289,9 +516,9 @@ impl DoubleEndedIterator for ops::Range { #[inline] fn nth_back(&mut self, n: usize) -> Option { - if let Some(minus_n) = self.end.sub_usize(n) { + if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) { if minus_n > self.start { - self.end = minus_n.sub_one(); + self.end = Step::backward(minus_n, 1); return Some(self.end.clone()); } } @@ -301,6 +528,9 @@ impl DoubleEndedIterator for ops::Range { } } +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for ops::Range {} + #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::Range {} @@ -310,9 +540,8 @@ impl Iterator for ops::RangeFrom { #[inline] fn next(&mut self) -> Option { - let mut n = self.start.add_one(); - mem::swap(&mut n, &mut self.start); - Some(n) + let n = Step::forward(self.start.clone(), 1); + Some(mem::replace(&mut self.start, n)) } #[inline] @@ -322,8 +551,16 @@ impl Iterator for ops::RangeFrom { #[inline] fn nth(&mut self, n: usize) -> Option { - let plus_n = self.start.add_usize(n).expect("overflow in RangeFrom::nth"); - self.start = plus_n.add_one(); + // If we would jump over the maximum value, panic immediately. + // This is consistent with behavior before the Step redesign, + // even though it's inconsistent with n `next` calls. + // To get consistent behavior, change it to use `forward` instead. + // This change should go through FCP separately to the redesign, so is for now left as a + // FIXME: make this consistent + let plus_n = + Step::forward_checked(self.start.clone(), n).expect("overflow in RangeFrom::nth"); + // The final step should always be debug-checked. + self.start = Step::forward(plus_n.clone(), 1); Some(plus_n) } } @@ -345,7 +582,7 @@ impl Iterator for ops::RangeInclusive { } let is_iterating = self.start < self.end; Some(if is_iterating { - let n = self.start.add_one(); + let n = Step::forward(self.start.clone(), 1); mem::replace(&mut self.start, n) } else { self.exhausted = true; @@ -371,12 +608,12 @@ impl Iterator for ops::RangeInclusive { return None; } - if let Some(plus_n) = self.start.add_usize(n) { + if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) { use crate::cmp::Ordering::*; match plus_n.partial_cmp(&self.end) { Some(Less) => { - self.start = plus_n.add_one(); + self.start = Step::forward(plus_n.clone(), 1); return Some(plus_n); } Some(Equal) => { @@ -407,7 +644,7 @@ impl Iterator for ops::RangeInclusive { let mut accum = init; while self.start < self.end { - let n = self.start.add_one(); + let n = Step::forward(self.start.clone(), 1); let n = mem::replace(&mut self.start, n); accum = f(accum, n)?; } @@ -446,7 +683,7 @@ impl DoubleEndedIterator for ops::RangeInclusive { } let is_iterating = self.start < self.end; Some(if is_iterating { - let n = self.end.sub_one(); + let n = Step::backward(self.end.clone(), 1); mem::replace(&mut self.end, n) } else { self.exhausted = true; @@ -460,12 +697,12 @@ impl DoubleEndedIterator for ops::RangeInclusive { return None; } - if let Some(minus_n) = self.end.sub_usize(n) { + if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) { use crate::cmp::Ordering::*; match minus_n.partial_cmp(&self.start) { Some(Greater) => { - self.end = minus_n.sub_one(); + self.end = Step::backward(minus_n.clone(), 1); return Some(minus_n); } Some(Equal) => { @@ -496,7 +733,7 @@ impl DoubleEndedIterator for ops::RangeInclusive { let mut accum = init; while self.start < self.end { - let n = self.end.sub_one(); + let n = Step::backward(self.end.clone(), 1); let n = mem::replace(&mut self.end, n); accum = f(accum, n)?; } @@ -511,5 +748,8 @@ impl DoubleEndedIterator for ops::RangeInclusive { } } +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for ops::RangeInclusive {} + #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::RangeInclusive {} diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index a259e293b0c1a..9b4872f5d9f7a 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -749,6 +749,23 @@ $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow +cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT), +"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_add(self, rhs: Self) -> Self { + intrinsics::unchecked_add(self, rhs) + } + } + doc_comment! { concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. @@ -774,6 +791,23 @@ $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow +cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT), +"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_sub(self, rhs: Self) -> Self { + intrinsics::unchecked_sub(self, rhs) + } + } + doc_comment! { concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if overflow occurred. @@ -799,6 +833,23 @@ $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow +cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT), +"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_mul(self, rhs: Self) -> Self { + intrinsics::unchecked_mul(self, rhs) + } + } + doc_comment! { concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0` or the division results in overflow. @@ -2936,6 +2987,23 @@ assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);", $EndFeat } } + doc_comment! { + concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow +cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT), +"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_add(self, rhs: Self) -> Self { + intrinsics::unchecked_add(self, rhs) + } + } + doc_comment! { concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. @@ -2959,6 +3027,23 @@ assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow +cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT), +"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_sub(self, rhs: Self) -> Self { + intrinsics::unchecked_sub(self, rhs) + } + } + doc_comment! { concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if overflow occurred. @@ -2982,6 +3067,23 @@ assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);", $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow +cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT), +"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_mul(self, rhs: Self) -> Self { + intrinsics::unchecked_mul(self, rhs) + } + } + doc_comment! { concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`. diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index 7da02b11676ab..52cf068f0a567 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -2139,6 +2139,24 @@ fn test_range_inclusive_nth_back() { assert_eq!(ExactSizeIterator::is_empty(&r), true); } +#[test] +fn test_range_len() { + assert_eq!((0..10_u8).len(), 10); + assert_eq!((9..10_u8).len(), 1); + assert_eq!((10..10_u8).len(), 0); + assert_eq!((11..10_u8).len(), 0); + assert_eq!((100..10_u8).len(), 0); +} + +#[test] +fn test_range_inclusive_len() { + assert_eq!((0..=10_u8).len(), 11); + assert_eq!((9..=10_u8).len(), 2); + assert_eq!((10..=10_u8).len(), 1); + assert_eq!((11..=10_u8).len(), 0); + assert_eq!((100..=10_u8).len(), 0); +} + #[test] fn test_range_step() { #![allow(deprecated)] @@ -2509,42 +2527,91 @@ fn test_chain_fold() { } #[test] -fn test_step_replace_unsigned() { - let mut x = 4u32; - let y = x.replace_zero(); - assert_eq!(x, 0); - assert_eq!(y, 4); +fn test_steps_between() { + assert_eq!(Step::steps_between(&20_u8, &200_u8), Some(180_usize)); + assert_eq!(Step::steps_between(&-20_i8, &80_i8), Some(100_usize)); + assert_eq!(Step::steps_between(&-120_i8, &80_i8), Some(200_usize)); + assert_eq!(Step::steps_between(&20_u32, &4_000_100_u32), Some(4_000_080_usize)); + assert_eq!(Step::steps_between(&-20_i32, &80_i32), Some(100_usize)); + assert_eq!(Step::steps_between(&-2_000_030_i32, &2_000_050_i32), Some(4_000_080_usize)); - x = 5; - let y = x.replace_one(); - assert_eq!(x, 1); - assert_eq!(y, 5); + // Skip u64/i64 to avoid differences with 32-bit vs 64-bit platforms + + assert_eq!(Step::steps_between(&20_u128, &200_u128), Some(180_usize)); + assert_eq!(Step::steps_between(&-20_i128, &80_i128), Some(100_usize)); + if cfg!(target_pointer_width = "64") { + assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_0009_u128), Some(usize::MAX)); + } + assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_000a_u128), None); + assert_eq!(Step::steps_between(&10_i128, &0x1_0000_0000_0000_000a_i128), None); + assert_eq!( + Step::steps_between(&-0x1_0000_0000_0000_0000_i128, &0x1_0000_0000_0000_0000_i128,), + None, + ); } #[test] -fn test_step_replace_signed() { - let mut x = 4i32; - let y = x.replace_zero(); - assert_eq!(x, 0); - assert_eq!(y, 4); +fn test_step_forward() { + assert_eq!(Step::forward_checked(55_u8, 200_usize), Some(255_u8)); + assert_eq!(Step::forward_checked(252_u8, 200_usize), None); + assert_eq!(Step::forward_checked(0_u8, 256_usize), None); + assert_eq!(Step::forward_checked(-110_i8, 200_usize), Some(90_i8)); + assert_eq!(Step::forward_checked(-110_i8, 248_usize), None); + assert_eq!(Step::forward_checked(-126_i8, 256_usize), None); + + assert_eq!(Step::forward_checked(35_u16, 100_usize), Some(135_u16)); + assert_eq!(Step::forward_checked(35_u16, 65500_usize), Some(u16::MAX)); + assert_eq!(Step::forward_checked(36_u16, 65500_usize), None); + assert_eq!(Step::forward_checked(-110_i16, 200_usize), Some(90_i16)); + assert_eq!(Step::forward_checked(-20_030_i16, 50_050_usize), Some(30_020_i16)); + assert_eq!(Step::forward_checked(-10_i16, 40_000_usize), None); + assert_eq!(Step::forward_checked(-10_i16, 70_000_usize), None); - x = 5; - let y = x.replace_one(); - assert_eq!(x, 1); - assert_eq!(y, 5); + assert_eq!(Step::forward_checked(10_u128, 70_000_usize), Some(70_010_u128)); + assert_eq!(Step::forward_checked(10_i128, 70_030_usize), Some(70_040_i128)); + assert_eq!( + Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0xff_usize), + Some(u128::MAX), + ); + assert_eq!( + Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0x100_usize), + None + ); + assert_eq!( + Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0xff_usize), + Some(i128::MAX), + ); + assert_eq!( + Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize), + None + ); } #[test] -fn test_step_replace_no_between() { - let mut x = 4u128; - let y = x.replace_zero(); - assert_eq!(x, 0); - assert_eq!(y, 4); +fn test_step_backward() { + assert_eq!(Step::backward_checked(255_u8, 200_usize), Some(55_u8)); + assert_eq!(Step::backward_checked(100_u8, 200_usize), None); + assert_eq!(Step::backward_checked(255_u8, 256_usize), None); + assert_eq!(Step::backward_checked(90_i8, 200_usize), Some(-110_i8)); + assert_eq!(Step::backward_checked(110_i8, 248_usize), None); + assert_eq!(Step::backward_checked(127_i8, 256_usize), None); + + assert_eq!(Step::backward_checked(135_u16, 100_usize), Some(35_u16)); + assert_eq!(Step::backward_checked(u16::MAX, 65500_usize), Some(35_u16)); + assert_eq!(Step::backward_checked(10_u16, 11_usize), None); + assert_eq!(Step::backward_checked(90_i16, 200_usize), Some(-110_i16)); + assert_eq!(Step::backward_checked(30_020_i16, 50_050_usize), Some(-20_030_i16)); + assert_eq!(Step::backward_checked(-10_i16, 40_000_usize), None); + assert_eq!(Step::backward_checked(-10_i16, 70_000_usize), None); - x = 5; - let y = x.replace_one(); - assert_eq!(x, 1); - assert_eq!(y, 5); + assert_eq!(Step::backward_checked(70_010_u128, 70_000_usize), Some(10_u128)); + assert_eq!(Step::backward_checked(70_020_i128, 70_030_usize), Some(-10_i128)); + assert_eq!(Step::backward_checked(10_u128, 7_usize), Some(3_u128)); + assert_eq!(Step::backward_checked(10_u128, 11_usize), None); + assert_eq!( + Step::backward_checked(-0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize), + Some(i128::MIN) + ); } #[test] diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index e7d36d327cd89..d636542d699f9 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -22,6 +22,7 @@ #![feature(slice_partition_at_index)] #![feature(specialization)] #![feature(step_trait)] +#![feature(step_trait_ext)] #![feature(str_internals)] #![feature(test)] #![feature(trusted_len)] diff --git a/src/librustc_ast/util/literal.rs b/src/librustc_ast/util/literal.rs index 1b17f343a6d67..4428d09902b92 100644 --- a/src/librustc_ast/util/literal.rs +++ b/src/librustc_ast/util/literal.rs @@ -6,8 +6,7 @@ use crate::tokenstream::TokenTree; use rustc_data_structures::sync::Lrc; use rustc_lexer::unescape::{unescape_byte, unescape_char}; -use rustc_lexer::unescape::{unescape_byte_str, unescape_str}; -use rustc_lexer::unescape::{unescape_raw_byte_str, unescape_raw_str}; +use rustc_lexer::unescape::{unescape_byte_literal, unescape_literal, Mode}; use rustc_span::symbol::{kw, sym, Symbol}; use rustc_span::Span; @@ -59,45 +58,53 @@ impl LitKind { // new symbol because the string in the LitKind is different to the // string in the token. let s = symbol.as_str(); - let symbol = if s.contains(&['\\', '\r'][..]) { - let mut buf = String::with_capacity(s.len()); - let mut error = Ok(()); - unescape_str(&s, &mut |_, unescaped_char| match unescaped_char { - Ok(c) => buf.push(c), - Err(_) => error = Err(LitError::LexerError), - }); - error?; - Symbol::intern(&buf) - } else { - symbol - }; + let symbol = + if s.contains(&['\\', '\r'][..]) { + let mut buf = String::with_capacity(s.len()); + let mut error = Ok(()); + unescape_literal(&s, Mode::Str, &mut |_, unescaped_char| { + match unescaped_char { + Ok(c) => buf.push(c), + Err(_) => error = Err(LitError::LexerError), + } + }); + error?; + Symbol::intern(&buf) + } else { + symbol + }; LitKind::Str(symbol, ast::StrStyle::Cooked) } token::StrRaw(n) => { // Ditto. let s = symbol.as_str(); - let symbol = if s.contains('\r') { - let mut buf = String::with_capacity(s.len()); - let mut error = Ok(()); - unescape_raw_str(&s, &mut |_, unescaped_char| match unescaped_char { - Ok(c) => buf.push(c), - Err(_) => error = Err(LitError::LexerError), - }); - error?; - buf.shrink_to_fit(); - Symbol::intern(&buf) - } else { - symbol - }; + let symbol = + if s.contains('\r') { + let mut buf = String::with_capacity(s.len()); + let mut error = Ok(()); + unescape_literal(&s, Mode::RawStr, &mut |_, unescaped_char| { + match unescaped_char { + Ok(c) => buf.push(c), + Err(_) => error = Err(LitError::LexerError), + } + }); + error?; + buf.shrink_to_fit(); + Symbol::intern(&buf) + } else { + symbol + }; LitKind::Str(symbol, ast::StrStyle::Raw(n)) } token::ByteStr => { let s = symbol.as_str(); let mut buf = Vec::with_capacity(s.len()); let mut error = Ok(()); - unescape_byte_str(&s, &mut |_, unescaped_byte| match unescaped_byte { - Ok(c) => buf.push(c), - Err(_) => error = Err(LitError::LexerError), + unescape_byte_literal(&s, Mode::ByteStr, &mut |_, unescaped_byte| { + match unescaped_byte { + Ok(c) => buf.push(c), + Err(_) => error = Err(LitError::LexerError), + } }); error?; buf.shrink_to_fit(); @@ -108,9 +115,11 @@ impl LitKind { let bytes = if s.contains('\r') { let mut buf = Vec::with_capacity(s.len()); let mut error = Ok(()); - unescape_raw_byte_str(&s, &mut |_, unescaped_byte| match unescaped_byte { - Ok(c) => buf.push(c), - Err(_) => error = Err(LitError::LexerError), + unescape_byte_literal(&s, Mode::RawByteStr, &mut |_, unescaped_byte| { + match unescaped_byte { + Ok(c) => buf.push(c), + Err(_) => error = Err(LitError::LexerError), + } }); error?; buf.shrink_to_fit(); diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 913ccf8e68089..6847b175e60eb 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -1138,6 +1138,16 @@ pub fn catch_fatal_errors R, R>(f: F) -> Result }) } +/// Variant of `catch_fatal_errors` for the `interface::Result` return type +/// that also computes the exit code. +pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 { + let result = catch_fatal_errors(f).and_then(|result| result); + match result { + Ok(()) => EXIT_SUCCESS, + Err(_) => EXIT_FAILURE, + } +} + lazy_static! { static ref DEFAULT_HOOK: Box) + Sync + Send + 'static> = { let hook = panic::take_hook(); @@ -1228,12 +1238,12 @@ pub fn init_rustc_env_logger() { env_logger::init_from_env("RUSTC_LOG"); } -pub fn main() { +pub fn main() -> ! { let start = Instant::now(); init_rustc_env_logger(); let mut callbacks = TimePassesCallbacks::default(); install_ice_hook(); - let result = catch_fatal_errors(|| { + let exit_code = catch_with_exit_code(|| { let args = env::args_os() .enumerate() .map(|(i, arg)| { @@ -1246,13 +1256,8 @@ pub fn main() { }) .collect::>(); run_compiler(&args, &mut callbacks, None, None) - }) - .and_then(|result| result); - let exit_code = match result { - Ok(_) => EXIT_SUCCESS, - Err(_) => EXIT_FAILURE, - }; + }); // The extra `\t` is necessary to align this label with the others. print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed()); - process::exit(exit_code); + process::exit(exit_code) } diff --git a/src/librustc_index/vec.rs b/src/librustc_index/vec.rs index a84f89c7cd950..67dcea58cf82b 100644 --- a/src/librustc_index/vec.rs +++ b/src/librustc_index/vec.rs @@ -65,7 +65,7 @@ impl Idx for u32 { /// `u32::MAX`. You can also customize things like the `Debug` impl, /// what traits are derived, and so forth via the macro. #[macro_export] -#[allow_internal_unstable(step_trait, rustc_attrs)] +#[allow_internal_unstable(step_trait, step_trait_ext, rustc_attrs)] macro_rules! newtype_index { // ---- public rules ---- @@ -181,7 +181,7 @@ macro_rules! newtype_index { } } - impl ::std::iter::Step for $type { + unsafe impl ::std::iter::Step for $type { #[inline] fn steps_between(start: &Self, end: &Self) -> Option { ::steps_between( @@ -191,33 +191,13 @@ macro_rules! newtype_index { } #[inline] - fn replace_one(&mut self) -> Self { - ::std::mem::replace(self, Self::from_u32(1)) + fn forward_checked(start: Self, u: usize) -> Option { + Self::index(start).checked_add(u).map(Self::from_usize) } #[inline] - fn replace_zero(&mut self) -> Self { - ::std::mem::replace(self, Self::from_u32(0)) - } - - #[inline] - fn add_one(&self) -> Self { - Self::from_usize(Self::index(*self) + 1) - } - - #[inline] - fn sub_one(&self) -> Self { - Self::from_usize(Self::index(*self) - 1) - } - - #[inline] - fn add_usize(&self, u: usize) -> Option { - Self::index(*self).checked_add(u).map(Self::from_usize) - } - - #[inline] - fn sub_usize(&self, u: usize) -> Option { - Self::index(*self).checked_sub(u).map(Self::from_usize) + fn backward_checked(start: Self, u: usize) -> Option { + Self::index(start).checked_sub(u).map(Self::from_usize) } } diff --git a/src/librustc_lexer/src/lib.rs b/src/librustc_lexer/src/lib.rs index 5ccfc1b276bfa..e44feee96607a 100644 --- a/src/librustc_lexer/src/lib.rs +++ b/src/librustc_lexer/src/lib.rs @@ -1,5 +1,11 @@ //! Low-level Rust lexer. //! +//! The idea with `librustc_lexer` is to make a reusable library, +//! by separating out pure lexing and rustc-specific concerns, like spans, +//! error reporting an interning. So, rustc_lexer operates directly on `&str`, +//! produces simple tokens which are a pair of type-tag and a bit of original text, +//! and does not report errors, instead storing them as flags on the token. +//! //! Tokens produced by this lexer are not yet ready for parsing the Rust syntax, //! for that see `librustc_parse::lexer`, which converts this basic token stream //! into wide tokens used by actual parser. @@ -719,6 +725,9 @@ impl Cursor<'_> { // Check that amount of closing '#' symbols // is equal to the amount of opening ones. + // Note that this will not consume extra trailing `#` characters: + // `r###"abcde"####` is lexed as a `LexedRawString { n_hashes: 3 }` + // followed by a `#` token. let mut hashes_left = n_start_hashes; let is_closing_hash = |c| { if c == '#' && hashes_left != 0 { @@ -739,8 +748,8 @@ impl Cursor<'_> { possible_terminator_offset: None, }; } else if n_end_hashes > max_hashes { - // Keep track of possible terminators to give a hint about where there might be - // a missing terminator + // Keep track of possible terminators to give a hint about + // where there might be a missing terminator possible_terminator_offset = Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len); max_hashes = n_end_hashes; diff --git a/src/librustc_lexer/src/unescape.rs b/src/librustc_lexer/src/unescape.rs index c51bf735fa727..2a9e1b7cbc346 100644 --- a/src/librustc_lexer/src/unescape.rs +++ b/src/librustc_lexer/src/unescape.rs @@ -58,69 +58,57 @@ pub enum EscapeError { NonAsciiCharInByteString, } -/// Takes a contents of a char literal (without quotes), and returns an -/// unescaped char or an error -pub fn unescape_char(literal_text: &str) -> Result { - let mut chars = literal_text.chars(); - unescape_char_or_byte(&mut chars, Mode::Char) - .map_err(|err| (literal_text.len() - chars.as_str().len(), err)) -} - -/// Takes a contents of a byte literal (without quotes), and returns an -/// unescaped byte or an error. -pub fn unescape_byte(literal_text: &str) -> Result { - let mut chars = literal_text.chars(); - unescape_char_or_byte(&mut chars, Mode::Byte) - .map(byte_from_char) - .map_err(|err| (literal_text.len() - chars.as_str().len(), err)) -} - -/// Takes a contents of a string literal (without quotes) and produces a +/// Takes a contents of a literal (without quotes) and produces a /// sequence of escaped characters or errors. /// Values are returned through invoking of the provided callback. -pub fn unescape_str(literal_text: &str, callback: &mut F) +pub fn unescape_literal(literal_text: &str, mode: Mode, callback: &mut F) where F: FnMut(Range, Result), { - unescape_str_or_byte_str(literal_text, Mode::Str, callback) + match mode { + Mode::Char | Mode::Byte => { + let mut chars = literal_text.chars(); + let result = unescape_char_or_byte(&mut chars, mode); + // The Chars iterator moved forward. + callback(0..(literal_text.len() - chars.as_str().len()), result); + } + Mode::Str | Mode::ByteStr => unescape_str_or_byte_str(literal_text, mode, callback), + // NOTE: Raw strings do not perform any explicit character escaping, here we + // only translate CRLF to LF and produce errors on bare CR. + Mode::RawStr | Mode::RawByteStr => { + unescape_raw_str_or_byte_str(literal_text, mode, callback) + } + } } -/// Takes a contents of a byte string literal (without quotes) and produces a -/// sequence of bytes or errors. +/// Takes a contents of a byte, byte string or raw byte string (without quotes) +/// and produces a sequence of bytes or errors. /// Values are returned through invoking of the provided callback. -pub fn unescape_byte_str(literal_text: &str, callback: &mut F) +pub fn unescape_byte_literal(literal_text: &str, mode: Mode, callback: &mut F) where F: FnMut(Range, Result), { - unescape_str_or_byte_str(literal_text, Mode::ByteStr, &mut |range, char| { - callback(range, char.map(byte_from_char)) + assert!(mode.is_bytes()); + unescape_literal(literal_text, mode, &mut |range, result| { + callback(range, result.map(byte_from_char)); }) } -/// Takes a contents of a raw string literal (without quotes) and produces a -/// sequence of characters or errors. -/// Values are returned through invoking of the provided callback. -/// NOTE: Raw strings do not perform any explicit character escaping, here we -/// only translate CRLF to LF and produce errors on bare CR. -pub fn unescape_raw_str(literal_text: &str, callback: &mut F) -where - F: FnMut(Range, Result), -{ - unescape_raw_str_or_byte_str(literal_text, Mode::Str, callback) +/// Takes a contents of a char literal (without quotes), and returns an +/// unescaped char or an error +pub fn unescape_char(literal_text: &str) -> Result { + let mut chars = literal_text.chars(); + unescape_char_or_byte(&mut chars, Mode::Char) + .map_err(|err| (literal_text.len() - chars.as_str().len(), err)) } -/// Takes a contents of a raw byte string literal (without quotes) and produces a -/// sequence of bytes or errors. -/// Values are returned through invoking of the provided callback. -/// NOTE: Raw strings do not perform any explicit character escaping, here we -/// only translate CRLF to LF and produce errors on bare CR. -pub fn unescape_raw_byte_str(literal_text: &str, callback: &mut F) -where - F: FnMut(Range, Result), -{ - unescape_raw_str_or_byte_str(literal_text, Mode::ByteStr, &mut |range, char| { - callback(range, char.map(byte_from_char)) - }) +/// Takes a contents of a byte literal (without quotes), and returns an +/// unescaped byte or an error. +pub fn unescape_byte(literal_text: &str) -> Result { + let mut chars = literal_text.chars(); + unescape_char_or_byte(&mut chars, Mode::Byte) + .map(byte_from_char) + .map_err(|err| (literal_text.len() - chars.as_str().len(), err)) } /// What kind of literal do we parse. @@ -130,13 +118,15 @@ pub enum Mode { Str, Byte, ByteStr, + RawStr, + RawByteStr, } impl Mode { pub fn in_single_quotes(self) -> bool { match self { Mode::Char | Mode::Byte => true, - Mode::Str | Mode::ByteStr => false, + Mode::Str | Mode::ByteStr | Mode::RawStr | Mode::RawByteStr => false, } } @@ -146,8 +136,8 @@ impl Mode { pub fn is_bytes(self) -> bool { match self { - Mode::Byte | Mode::ByteStr => true, - Mode::Char | Mode::Str => false, + Mode::Byte | Mode::ByteStr | Mode::RawByteStr => true, + Mode::Char | Mode::Str | Mode::RawStr => false, } } } @@ -345,7 +335,7 @@ where fn byte_from_char(c: char) -> u8 { let res = c as u32; - assert!(res <= u8::max_value() as u32, "guaranteed because of Mode::Byte(Str)"); + assert!(res <= u8::max_value() as u32, "guaranteed because of Mode::ByteStr"); res as u8 } diff --git a/src/librustc_lexer/src/unescape/tests.rs b/src/librustc_lexer/src/unescape/tests.rs index e7b1ff6479d88..f2b751a78f27f 100644 --- a/src/librustc_lexer/src/unescape/tests.rs +++ b/src/librustc_lexer/src/unescape/tests.rs @@ -102,7 +102,7 @@ fn test_unescape_char_good() { fn test_unescape_str_good() { fn check(literal_text: &str, expected: &str) { let mut buf = Ok(String::with_capacity(literal_text.len())); - unescape_str(literal_text, &mut |range, c| { + unescape_literal(literal_text, Mode::Str, &mut |range, c| { if let Ok(b) = &mut buf { match c { Ok(c) => b.push(c), @@ -222,7 +222,7 @@ fn test_unescape_byte_good() { fn test_unescape_byte_str_good() { fn check(literal_text: &str, expected: &[u8]) { let mut buf = Ok(Vec::with_capacity(literal_text.len())); - unescape_byte_str(literal_text, &mut |range, c| { + unescape_byte_literal(literal_text, Mode::ByteStr, &mut |range, c| { if let Ok(b) = &mut buf { match c { Ok(c) => b.push(c), @@ -246,7 +246,7 @@ fn test_unescape_byte_str_good() { fn test_unescape_raw_str() { fn check(literal: &str, expected: &[(Range, Result)]) { let mut unescaped = Vec::with_capacity(literal.len()); - unescape_raw_str(literal, &mut |range, res| unescaped.push((range, res))); + unescape_literal(literal, Mode::RawStr, &mut |range, res| unescaped.push((range, res))); assert_eq!(unescaped, expected); } @@ -258,7 +258,9 @@ fn test_unescape_raw_str() { fn test_unescape_raw_byte_str() { fn check(literal: &str, expected: &[(Range, Result)]) { let mut unescaped = Vec::with_capacity(literal.len()); - unescape_raw_byte_str(literal, &mut |range, res| unescaped.push((range, res))); + unescape_byte_literal(literal, Mode::RawByteStr, &mut |range, res| { + unescaped.push((range, res)) + }); assert_eq!(unescaped, expected); } diff --git a/src/librustc_parse/lexer/mod.rs b/src/librustc_parse/lexer/mod.rs index f676a34a1d12b..1d67d6de4daa5 100644 --- a/src/librustc_parse/lexer/mod.rs +++ b/src/librustc_parse/lexer/mod.rs @@ -15,6 +15,7 @@ mod tokentrees; mod unescape_error_reporting; mod unicode_chars; +use rustc_lexer::unescape::Mode; use unescape_error_reporting::{emit_unescape_error, push_escaped_char}; #[derive(Clone, Debug)] @@ -326,38 +327,27 @@ impl<'a> StringReader<'a> { suffix_start: BytePos, kind: rustc_lexer::LiteralKind, ) -> (token::LitKind, Symbol) { - match kind { + // prefix means `"` or `br"` or `r###"`, ... + let (lit_kind, mode, prefix_len, postfix_len) = match kind { rustc_lexer::LiteralKind::Char { terminated } => { if !terminated { self.fatal_span_(start, suffix_start, "unterminated character literal").raise() } - let content_start = start + BytePos(1); - let content_end = suffix_start - BytePos(1); - self.validate_char_escape(content_start, content_end); - let id = self.symbol_from_to(content_start, content_end); - (token::Char, id) + (token::Char, Mode::Char, 1, 1) // ' ' } rustc_lexer::LiteralKind::Byte { terminated } => { if !terminated { self.fatal_span_(start + BytePos(1), suffix_start, "unterminated byte constant") .raise() } - let content_start = start + BytePos(2); - let content_end = suffix_start - BytePos(1); - self.validate_byte_escape(content_start, content_end); - let id = self.symbol_from_to(content_start, content_end); - (token::Byte, id) + (token::Byte, Mode::Byte, 2, 1) // b' ' } rustc_lexer::LiteralKind::Str { terminated } => { if !terminated { self.fatal_span_(start, suffix_start, "unterminated double quote string") .raise() } - let content_start = start + BytePos(1); - let content_end = suffix_start - BytePos(1); - self.validate_str_escape(content_start, content_end); - let id = self.symbol_from_to(content_start, content_end); - (token::Str, id) + (token::Str, Mode::Str, 1, 1) // " " } rustc_lexer::LiteralKind::ByteStr { terminated } => { if !terminated { @@ -368,42 +358,28 @@ impl<'a> StringReader<'a> { ) .raise() } - let content_start = start + BytePos(2); - let content_end = suffix_start - BytePos(1); - self.validate_byte_str_escape(content_start, content_end); - let id = self.symbol_from_to(content_start, content_end); - (token::ByteStr, id) + (token::ByteStr, Mode::ByteStr, 2, 1) // b" " } rustc_lexer::LiteralKind::RawStr(unvalidated_raw_str) => { let valid_raw_str = self.validate_and_report_errors(start, unvalidated_raw_str); let n_hashes = valid_raw_str.num_hashes(); let n = u32::from(n_hashes); - - let content_start = start + BytePos(2 + n); - let content_end = suffix_start - BytePos(1 + n); - self.validate_raw_str_escape(content_start, content_end); - let id = self.symbol_from_to(content_start, content_end); - (token::StrRaw(n_hashes), id) + (token::StrRaw(n_hashes), Mode::RawStr, 2 + n, 1 + n) // r##" "## } rustc_lexer::LiteralKind::RawByteStr(unvalidated_raw_str) => { let validated_raw_str = self.validate_and_report_errors(start, unvalidated_raw_str); let n_hashes = validated_raw_str.num_hashes(); let n = u32::from(n_hashes); - - let content_start = start + BytePos(3 + n); - let content_end = suffix_start - BytePos(1 + n); - self.validate_raw_byte_str_escape(content_start, content_end); - let id = self.symbol_from_to(content_start, content_end); - (token::ByteStrRaw(n_hashes), id) + (token::ByteStrRaw(n_hashes), Mode::RawByteStr, 3 + n, 1 + n) // br##" "## } rustc_lexer::LiteralKind::Int { base, empty_int } => { - if empty_int { + return if empty_int { self.err_span_(start, suffix_start, "no valid digits found for number"); (token::Integer, sym::integer(0)) } else { self.validate_int_literal(base, start, suffix_start); (token::Integer, self.symbol_from_to(start, suffix_start)) - } + }; } rustc_lexer::LiteralKind::Float { base, empty_exponent } => { if empty_exponent { @@ -431,9 +407,14 @@ impl<'a> StringReader<'a> { } let id = self.symbol_from_to(start, suffix_start); - (token::Float, id) + return (token::Float, id); } - } + }; + let content_start = start + BytePos(prefix_len); + let content_end = suffix_start - BytePos(postfix_len); + let id = self.symbol_from_to(content_start, content_end); + self.validate_literal_escape(mode, content_start, content_end); + return (lit_kind, id); } #[inline] @@ -555,96 +536,23 @@ impl<'a> StringReader<'a> { .raise(); } - fn validate_char_escape(&self, content_start: BytePos, content_end: BytePos) { - let lit = self.str_from_to(content_start, content_end); - if let Err((off, err)) = unescape::unescape_char(lit) { - emit_unescape_error( - &self.sess.span_diagnostic, - lit, - self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)), - unescape::Mode::Char, - 0..off, - err, - ) - } - } - - fn validate_byte_escape(&self, content_start: BytePos, content_end: BytePos) { - let lit = self.str_from_to(content_start, content_end); - if let Err((off, err)) = unescape::unescape_byte(lit) { - emit_unescape_error( - &self.sess.span_diagnostic, - lit, - self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)), - unescape::Mode::Byte, - 0..off, - err, - ) - } - } - - fn validate_str_escape(&self, content_start: BytePos, content_end: BytePos) { - let lit = self.str_from_to(content_start, content_end); - unescape::unescape_str(lit, &mut |range, c| { - if let Err(err) = c { + fn validate_literal_escape(&self, mode: Mode, content_start: BytePos, content_end: BytePos) { + let lit_content = self.str_from_to(content_start, content_end); + unescape::unescape_literal(lit_content, mode, &mut |range, result| { + // Here we only check for errors. The actual unescaping is done later. + if let Err(err) = result { + let span_with_quotes = + self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)); emit_unescape_error( &self.sess.span_diagnostic, - lit, - self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)), - unescape::Mode::Str, + lit_content, + span_with_quotes, + mode, range, err, - ) - } - }) - } - - fn validate_raw_str_escape(&self, content_start: BytePos, content_end: BytePos) { - let lit = self.str_from_to(content_start, content_end); - unescape::unescape_raw_str(lit, &mut |range, c| { - if let Err(err) = c { - emit_unescape_error( - &self.sess.span_diagnostic, - lit, - self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)), - unescape::Mode::Str, - range, - err, - ) - } - }) - } - - fn validate_raw_byte_str_escape(&self, content_start: BytePos, content_end: BytePos) { - let lit = self.str_from_to(content_start, content_end); - unescape::unescape_raw_byte_str(lit, &mut |range, c| { - if let Err(err) = c { - emit_unescape_error( - &self.sess.span_diagnostic, - lit, - self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)), - unescape::Mode::ByteStr, - range, - err, - ) - } - }) - } - - fn validate_byte_str_escape(&self, content_start: BytePos, content_end: BytePos) { - let lit = self.str_from_to(content_start, content_end); - unescape::unescape_byte_str(lit, &mut |range, c| { - if let Err(err) = c { - emit_unescape_error( - &self.sess.span_diagnostic, - lit, - self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)), - unescape::Mode::ByteStr, - range, - err, - ) + ); } - }) + }); } fn validate_int_literal(&self, base: Base, content_start: BytePos, content_end: BytePos) { diff --git a/src/librustc_session/options.rs b/src/librustc_session/options.rs index be1c358d58e0f..e2c82a397c738 100644 --- a/src/librustc_session/options.rs +++ b/src/librustc_session/options.rs @@ -271,6 +271,7 @@ macro_rules! options { "one of supported relocation models (`rustc --print relocation-models`)"; pub const parse_tls_model: &str = "one of supported TLS models (`rustc --print tls-models`)"; + pub const parse_target_feature: &str = parse_string; } #[allow(dead_code)] @@ -647,6 +648,19 @@ macro_rules! options { } true } + + fn parse_target_feature(slot: &mut String, v: Option<&str>) -> bool { + match v { + Some(s) => { + if !slot.is_empty() { + slot.push_str(","); + } + slot.push_str(s); + true + } + None => false, + } + } } ) } @@ -742,7 +756,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, "use soft float ABI (*eabihf targets only) (default: no)"), target_cpu: Option = (None, parse_opt_string, [TRACKED], "select target processor (`rustc --print target-cpus` for details)"), - target_feature: String = (String::new(), parse_string, [TRACKED], + target_feature: String = (String::new(), parse_target_feature, [TRACKED], "target specific attributes. (`rustc --print target-features` for details). \ This feature is unsafe."), diff --git a/src/test/codegen/target-feature-multiple.rs b/src/test/codegen/target-feature-multiple.rs new file mode 100644 index 0000000000000..f71a9c3c58216 --- /dev/null +++ b/src/test/codegen/target-feature-multiple.rs @@ -0,0 +1,9 @@ +// only-x86_64 +// compile-flags: -C target-feature=+sse2,-avx,+avx2 -C target-feature=+avx,-avx2 + +#![crate_type = "lib"] + +#[no_mangle] +pub fn foo() { + // CHECK: attributes #0 = { {{.*}}"target-features"="+sse2,-avx,+avx2,+avx,-avx2"{{.*}} } +} diff --git a/src/test/ui/impl-trait/example-calendar.rs b/src/test/ui/impl-trait/example-calendar.rs index f1b1656745e7c..fafab8a102a90 100644 --- a/src/test/ui/impl-trait/example-calendar.rs +++ b/src/test/ui/impl-trait/example-calendar.rs @@ -2,6 +2,7 @@ #![feature(fn_traits, step_trait, + step_trait_ext, unboxed_closures, )] @@ -10,7 +11,6 @@ //! Originally converted to Rust by [Daniel Keep](https://github.com/DanielKeep). use std::fmt::Write; -use std::mem; /// Date representation. #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] @@ -156,32 +156,16 @@ impl<'a, 'b> std::ops::Add<&'b NaiveDate> for &'a NaiveDate { } } -impl std::iter::Step for NaiveDate { +unsafe impl std::iter::Step for NaiveDate { fn steps_between(_: &Self, _: &Self) -> Option { unimplemented!() } - fn replace_one(&mut self) -> Self { - mem::replace(self, NaiveDate(0, 0, 1)) + fn forward_checked(start: Self, n: usize) -> Option { + Some((0..n).fold(start, |x, _| x.succ())) } - fn replace_zero(&mut self) -> Self { - mem::replace(self, NaiveDate(0, 0, 0)) - } - - fn add_one(&self) -> Self { - self.succ() - } - - fn sub_one(&self) -> Self { - unimplemented!() - } - - fn add_usize(&self, _: usize) -> Option { - unimplemented!() - } - - fn sub_usize(&self, _: usize) -> Option { + fn backward_checked(_: Self, _: usize) -> Option { unimplemented!() } } diff --git a/src/tools/clippy/clippy_lints/src/write.rs b/src/tools/clippy/clippy_lints/src/write.rs index 5ad43ad55a36a..26bf463bd2922 100644 --- a/src/tools/clippy/clippy_lints/src/write.rs +++ b/src/tools/clippy/clippy_lints/src/write.rs @@ -483,8 +483,8 @@ fn check_newlines(fmtstr: &StrLit) -> bool { }; match fmtstr.style { - StrStyle::Cooked => unescape::unescape_str(contents, &mut cb), - StrStyle::Raw(_) => unescape::unescape_raw_str(contents, &mut cb), + StrStyle::Cooked => unescape::unescape_literal(contents, unescape::Mode::Str, &mut cb), + StrStyle::Raw(_) => unescape::unescape_literal(contents, unescape::Mode::RawStr, &mut cb), } should_lint diff --git a/src/tools/clippy/src/driver.rs b/src/tools/clippy/src/driver.rs index 2c699998ea90e..1ce0300f23904 100644 --- a/src/tools/clippy/src/driver.rs +++ b/src/tools/clippy/src/driver.rs @@ -296,7 +296,7 @@ pub fn main() { rustc_driver::init_rustc_env_logger(); lazy_static::initialize(&ICE_HOOK); exit( - rustc_driver::catch_fatal_errors(move || { + rustc_driver::catch_with_exit_code(move || { let mut orig_args: Vec = env::args().collect(); if orig_args.iter().any(|a| a == "--version" || a == "-V") { @@ -411,7 +411,5 @@ pub fn main() { if clippy_enabled { &mut clippy } else { &mut default }; rustc_driver::run_compiler(&args, callbacks, None, None) }) - .and_then(|result| result) - .is_err() as i32, ) }