diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index f148b776852aa..64d8376d27ca5 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -945,6 +945,13 @@ impl Step for Size { u64::forward_checked(start.bytes(), count).map(Self::from_bytes) } + #[inline] + #[cfg(not(bootstrap))] + fn forward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = u64::forward_overflowing(start.bytes(), count); + (Self::from_bytes(s), o) + } + #[inline] fn forward(start: Self, count: usize) -> Self { Self::from_bytes(u64::forward(start.bytes(), count)) @@ -960,6 +967,13 @@ impl Step for Size { u64::backward_checked(start.bytes(), count).map(Self::from_bytes) } + #[inline] + #[cfg(not(bootstrap))] + fn backward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = u64::backward_overflowing(start.bytes(), count); + (Self::from_bytes(s), o) + } + #[inline] fn backward(start: Self, count: usize) -> Self { Self::from_bytes(u64::backward(start.bytes(), count)) diff --git a/compiler/rustc_index_macros/src/newtype.rs b/compiler/rustc_index_macros/src/newtype.rs index b6ee283e736c4..ed860d6d60cbd 100644 --- a/compiler/rustc_index_macros/src/newtype.rs +++ b/compiler/rustc_index_macros/src/newtype.rs @@ -146,6 +146,20 @@ impl Parse for Newtype { fn backward_checked(start: Self, u: usize) -> Option { Self::index(start).checked_sub(u).map(Self::from_usize) } + + #[inline] + #[cfg(not(bootstrap))] + fn forward_overflowing(start: Self, u: usize) -> (Self, bool) { + let (s, o) = Self::index(start).overflowing_add(u); + (Self::from_usize(s), o) + } + + #[inline] + #[cfg(not(bootstrap))] + fn backward_overflowing(start: Self, u: usize) -> (Self, bool) { + let (s, o) = Self::index(start).overflowing_sub(u); + (Self::from_usize(s), o) + } } impl ::std::cmp::Ord for #name { fn cmp(&self, other: &Self) -> std::cmp::Ordering { diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index 3b025fdcda904..50589e0395472 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -66,6 +66,31 @@ pub const trait Step: [const] Clone + [const] PartialOrd + Sized { /// * Corollary: `Step::forward_checked(a, 0) == Some(a)` fn forward_checked(start: Self, count: usize) -> Option; + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times along with a boolean tracking whether overflow + /// occurred. + /// + /// If this would overflow the range of values supported by `Self`, the + /// value returned is unspecified and should not be relied on, though + /// typically wrapping (modular arithmetic) is the most effective + /// implementation to enable optimizations. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`, where no overflow occurs: + /// + /// * `Step::forward_overflowing(Step::forward_overflowing(a, n).0, m) == Step::forward_overflowing(a, n + m)` + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::forward_overflowing(a, n) == (Step::forward_checked(a, n).unwrap(), false)` + /// + /// For any `a` and `n`: + /// + /// * `Step::forward_overflowing(a, n) == (0..n).fold((a, false), |(x, y), _| { let (s, o) = Step::forward_overflowing(x, 1); (s, y || o) })` + /// * Corollary: `Step::forward_overflowing(a, 0) == (a, false)` + fn forward_overflowing(start: Self, count: usize) -> (Self, bool); + /// Returns the value that would be obtained by taking the *successor* /// of `self` `count` times. /// @@ -136,6 +161,31 @@ pub const trait Step: [const] Clone + [const] PartialOrd + Sized { /// * Corollary: `Step::backward_checked(a, 0) == Some(a)` fn backward_checked(start: Self, count: usize) -> Option; + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times along with a boolean tracking whether overflow + /// occurred. + /// + /// If this would overflow the range of values supported by `Self`, the + /// value returned is unspecified and should not be relied on, though + /// typically wrapping (modular arithmetic) is the most effective + /// implementation to enable optimizations. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`, where no overflow occurs: + /// + /// * `Step::backward_overflowing(Step::backward_overflowing(a, n).0, m) == Step::backward_overflowing(a, n + m)` + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::backward_overflowing(a, n) == (Step::backward_checked(a, n).unwrap(), false)` + /// + /// For any `a` and `n`: + /// + /// * `Step::backward_overflowing(a, n) == (0..n).fold((a, false), |(x, y), _| { let (s, o) = Step::backward_overflowing(x, 1); (s, y || o) })` + /// * Corollary: `Step::backward_overflowing(a, 0) == (a, false)` + fn backward_overflowing(start: Self, count: usize) -> (Self, bool); + /// Returns the value that would be obtained by taking the *predecessor* /// of `self` `count` times. /// @@ -293,6 +343,24 @@ macro_rules! step_integer_impls { Err(_) => None, // if n is out of range, `unsigned_start - n` is too } } + + #[inline] + fn forward_overflowing(start: Self, n: usize) -> (Self, bool) { + match Self::try_from(n) { + Ok(n) => start.overflowing_add(n), + // if n is out of range, `start + n` must overflow + Err(_) => (start.wrapping_add(n as Self), true), + } + } + + #[inline] + fn backward_overflowing(start: Self, n: usize) -> (Self, bool) { + match Self::try_from(n) { + Ok(n) => start.overflowing_sub(n), + // if n is out of range, `start - n` must overflow + Err(_) => (start.wrapping_add(n as Self), true), + } + } } #[allow(unreachable_patterns)] @@ -358,6 +426,28 @@ macro_rules! step_integer_impls { Err(_) => None, } } + + #[inline] + fn forward_overflowing(start: Self, n: usize) -> (Self, bool) { + match $u_narrower::try_from(n) { + Ok(n) => start.overflowing_add_unsigned(n), + // If n is out of range of e.g. u8, + // then it is bigger than the entire range for i8 is wide + // so `any_i8 + n` necessarily overflows i8. + Err(_) => (start.wrapping_add(n as Self), true), + } + } + + #[inline] + fn backward_overflowing(start: Self, n: usize) -> (Self, bool) { + match $u_narrower::try_from(n) { + Ok(n) => start.overflowing_sub_unsigned(n), + // If n is out of range of e.g. u8, + // then it is bigger than the entire range for i8 is wide + // so `any_i8 - n` necessarily overflows i8. + Err(_) => (start.wrapping_add(n as Self), true), + } + } } )+ @@ -391,6 +481,16 @@ macro_rules! step_integer_impls { fn backward_checked(start: Self, n: usize) -> Option { start.checked_sub(n as Self) } + + #[inline] + fn forward_overflowing(start: Self, n: usize) -> (Self, bool) { + start.overflowing_add(n as Self) + } + + #[inline] + fn backward_overflowing(start: Self, n: usize) -> (Self, bool) { + start.overflowing_sub(n as Self) + } } #[allow(unreachable_patterns)] @@ -429,6 +529,16 @@ macro_rules! step_integer_impls { fn backward_checked(start: Self, n: usize) -> Option { start.checked_sub(n as Self) } + + #[inline] + fn forward_overflowing(start: Self, n: usize) -> (Self, bool) { + start.overflowing_add_unsigned(n as $u_wider) + } + + #[inline] + fn backward_overflowing(start: Self, n: usize) -> (Self, bool) { + start.overflowing_add_unsigned(n as $u_wider) + } } )+ }; @@ -490,6 +600,31 @@ macro_rules! step_nonzero_identical_methods { Self::new(start.get().saturating_sub(n as $int)).unwrap_or(Self::MIN) } + // Note: These NonZero overflowing implementations were chosen for + // code simplicity. Many alternative impls were examined, and some + // yielded marginally simpler assembly, but none resulted in the same + // loop -> arithmetic optimizations seen with the bare integers. + + #[inline] + fn forward_overflowing(start: Self, n: usize) -> (Self, bool) { + // Wrapping to Zero causes UB, so saturate to MAX instead. + if let Some(s) = Step::forward_checked(start, n) { + (s, false) + } else { + (Self::MAX, true) + } + } + + #[inline] + fn backward_overflowing(start: Self, n: usize) -> (Self, bool) { + // Subtracting to Zero causes UB, so saturate to MIN instead. + if let Some(s) = Step::backward_checked(start, n) { + (s, false) + } else { + (Self::MIN, true) + } + } + #[inline] fn steps_between(start: &Self, end: &Self) -> (usize, Option) { if *start <= *end { @@ -627,6 +762,29 @@ impl const Step for char { Some(unsafe { char::from_u32_unchecked(res) }) } + // Note: These char overflowing implementations were chosen for + // code simplicity. Alternative impls were examined, and some + // yielded marginally simpler assembly, but none resulted in the same + // loop -> arithmetic optimizations seen with the bare integers. + + #[inline] + fn forward_overflowing(start: Self, count: usize) -> (Self, bool) { + if let Some(c) = Step::forward_checked(start, count) { + (c, false) + } else { + (Self::MAX, true) + } + } + + #[inline] + fn backward_overflowing(start: Self, count: usize) -> (Self, bool) { + if let Some(c) = Step::backward_checked(start, count) { + (c, false) + } else { + (Self::MIN, true) + } + } + #[inline] unsafe fn forward_unchecked(start: char, count: usize) -> char { let start = start as u32; @@ -682,6 +840,24 @@ impl const Step for AsciiChar { Some(unsafe { AsciiChar::from_u8_unchecked(end) }) } + #[inline] + fn forward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = (start as usize).overflowing_add(count); + let ret = s & (AsciiChar::MAX as usize); + + // SAFETY: Clamped to [0, MAX], must be valid ASCII + (unsafe { AsciiChar::from_u8_unchecked(ret as u8) }, o || ret < s) + } + + #[inline] + fn backward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = (start as usize).overflowing_sub(count); + let ret = s & (AsciiChar::MAX as usize); + + // SAFETY: Clamped to [0, MAX], must be valid ASCII + (unsafe { AsciiChar::from_u8_unchecked(ret as u8) }, o || ret < s) + } + #[inline] unsafe fn forward_unchecked(start: AsciiChar, count: usize) -> AsciiChar { // SAFETY: Caller asserts that result is a valid ASCII character, @@ -721,6 +897,18 @@ impl const Step for Ipv4Addr { u32::backward_checked(start.to_bits(), count).map(Ipv4Addr::from_bits) } + #[inline] + fn forward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = u32::forward_overflowing(start.to_bits(), count); + (Ipv4Addr::from_bits(s), o) + } + + #[inline] + fn backward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = u32::backward_overflowing(start.to_bits(), count); + (Ipv4Addr::from_bits(s), o) + } + #[inline] unsafe fn forward_unchecked(start: Ipv4Addr, count: usize) -> Ipv4Addr { // SAFETY: Since u32 and Ipv4Addr are losslessly convertible, @@ -754,6 +942,18 @@ impl const Step for Ipv6Addr { u128::backward_checked(start.to_bits(), count).map(Ipv6Addr::from_bits) } + #[inline] + fn forward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = u128::forward_overflowing(start.to_bits(), count); + (Ipv6Addr::from_bits(s), o) + } + + #[inline] + fn backward_overflowing(start: Self, count: usize) -> (Self, bool) { + let (s, o) = u128::backward_overflowing(start.to_bits(), count); + (Ipv6Addr::from_bits(s), o) + } + #[inline] unsafe fn forward_unchecked(start: Ipv6Addr, count: usize) -> Ipv6Addr { // SAFETY: Since u128 and Ipv6Addr are losslessly convertible, @@ -1186,7 +1386,6 @@ trait RangeInclusiveIteratorImpl { type Item; // Iterator - fn spec_next(&mut self) -> Option; fn spec_try_fold(&mut self, init: B, f: F) -> R where Self: Sized, @@ -1194,7 +1393,6 @@ trait RangeInclusiveIteratorImpl { R: Try; // DoubleEndedIterator - fn spec_next_back(&mut self) -> Option; fn spec_try_rfold(&mut self, init: B, f: F) -> R where Self: Sized, @@ -1205,22 +1403,6 @@ trait RangeInclusiveIteratorImpl { impl RangeInclusiveIteratorImpl for ops::RangeInclusive { type Item = A; - #[inline] - default fn spec_next(&mut self) -> Option { - if self.is_empty() { - return None; - } - let is_iterating = self.start < self.end; - Some(if is_iterating { - let n = - Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld"); - mem::replace(&mut self.start, n) - } else { - self.exhausted = true; - self.start.clone() - }) - } - #[inline] default fn spec_try_fold(&mut self, init: B, mut f: F) -> R where @@ -1250,22 +1432,6 @@ impl RangeInclusiveIteratorImpl for ops::RangeInclusive { try { accum } } - #[inline] - default fn spec_next_back(&mut self) -> Option { - if self.is_empty() { - return None; - } - let is_iterating = self.start < self.end; - Some(if is_iterating { - let n = - Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld"); - mem::replace(&mut self.end, n) - } else { - self.exhausted = true; - self.end.clone() - }) - } - #[inline] default fn spec_try_rfold(&mut self, init: B, mut f: F) -> R where @@ -1297,22 +1463,6 @@ impl RangeInclusiveIteratorImpl for ops::RangeInclusive { } impl RangeInclusiveIteratorImpl for ops::RangeInclusive { - #[inline] - fn spec_next(&mut self) -> Option { - if self.is_empty() { - return None; - } - let is_iterating = self.start < self.end; - Some(if is_iterating { - // SAFETY: just checked precondition - let n = unsafe { Step::forward_unchecked(self.start, 1) }; - mem::replace(&mut self.start, n) - } else { - self.exhausted = true; - self.start - }) - } - #[inline] fn spec_try_fold(&mut self, init: B, mut f: F) -> R where @@ -1342,22 +1492,6 @@ impl RangeInclusiveIteratorImpl for ops::RangeInclusive { try { accum } } - #[inline] - fn spec_next_back(&mut self) -> Option { - if self.is_empty() { - return None; - } - let is_iterating = self.start < self.end; - Some(if is_iterating { - // SAFETY: just checked precondition - let n = unsafe { Step::backward_unchecked(self.end, 1) }; - mem::replace(&mut self.end, n) - } else { - self.exhausted = true; - self.end - }) - } - #[inline] fn spec_try_rfold(&mut self, init: B, mut f: F) -> R where @@ -1394,7 +1528,14 @@ impl Iterator for ops::RangeInclusive { #[inline] fn next(&mut self) -> Option { - self.spec_next() + if self.is_empty() { + return None; + } + + let (n, o) = Step::forward_overflowing(self.start.clone(), 1); + + self.exhausted = o; + Some(mem::replace(&mut self.start, n)) } #[inline] @@ -1425,26 +1566,13 @@ impl Iterator for ops::RangeInclusive { return None; } - if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) { - use crate::cmp::Ordering::*; + let (plus_n, on) = Step::forward_overflowing(self.start.clone(), n); + let (plus_1, o1) = Step::forward_overflowing(plus_n.clone(), 1); - match plus_n.partial_cmp(&self.end) { - Some(Less) => { - self.start = Step::forward(plus_n.clone(), 1); - return Some(plus_n); - } - Some(Equal) => { - self.start = plus_n.clone(); - self.exhausted = true; - return Some(plus_n); - } - _ => {} - } - } + self.start = plus_1; + self.exhausted = on | o1; - self.start = self.end.clone(); - self.exhausted = true; - None + if !on && plus_n <= self.end { Some(plus_n) } else { None } } #[inline] @@ -1490,7 +1618,14 @@ impl Iterator for ops::RangeInclusive { impl DoubleEndedIterator for ops::RangeInclusive { #[inline] fn next_back(&mut self) -> Option { - self.spec_next_back() + if self.is_empty() { + return None; + } + + let (n, o) = Step::backward_overflowing(self.end.clone(), 1); + + self.exhausted = o; + Some(mem::replace(&mut self.end, n)) } #[inline] @@ -1499,26 +1634,13 @@ impl DoubleEndedIterator for ops::RangeInclusive { return None; } - if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) { - use crate::cmp::Ordering::*; + let (minus_n, on) = Step::backward_overflowing(self.end.clone(), n); + let (minus_1, o1) = Step::backward_overflowing(minus_n.clone(), 1); - match minus_n.partial_cmp(&self.start) { - Some(Greater) => { - self.end = Step::backward(minus_n.clone(), 1); - return Some(minus_n); - } - Some(Equal) => { - self.end = minus_n.clone(); - self.exhausted = true; - return Some(minus_n); - } - _ => {} - } - } + self.end = minus_1; + self.exhausted = on | o1; - self.end = self.start.clone(); - self.exhausted = true; - None + if !on && minus_n >= self.start { Some(minus_n) } else { None } } #[inline] diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs index c15c8f20c16be..4aec5b28a5124 100644 --- a/library/core/src/ops/range.rs +++ b/library/core/src/ops/range.rs @@ -363,10 +363,15 @@ pub struct RangeInclusive { pub(crate) start: Idx, pub(crate) end: Idx, - // This field is: + // This field represents an overflow flag for either bound (start or end): // - `false` upon construction - // - `false` when iteration has yielded an element and the iterator is not exhausted - // - `true` when iteration has been used to exhaust the iterator + // - `false` when iteration has yielded an element and + // neither bound has overflowed the valid range of `Idx` + // - `true` when iteration has caused either bound to + // overflow the valid range of `Idx` + // + // When this is true, `start` or `end` may be left in an unspecified state, + // often wrapping (modular arithmetic) around at the boundary of `Idx`. // // This is required to support PartialEq and Hash without a PartialOrd bound or specialization. pub(crate) exhausted: bool, @@ -464,6 +469,13 @@ impl RangeInclusive { /// The caller is responsible for dealing with `end == usize::MAX`. #[inline] pub(crate) const fn into_slice_range(self) -> Range { + // Typically users should not be indexing with exhausted instances, + // but this heuristic should apply to most cases. This doesn't + // handle reverse iteration well (`next_back` and `nth_back` can + // cause `end` to wrap around to values at or near `usize::MAX`), + // but using an exhausted `RangeInclusive` after reverse iteration + // is an exceedingly rare case. + // If we're not exhausted, we want to simply slice `start..end + 1`. // If we are exhausted, then slicing with `end + 1..end + 1` gives us an // empty range that is still subject to bounds-checks for that endpoint. @@ -1127,9 +1139,11 @@ impl const RangeBounds for RangeInclusive { } fn end_bound(&self) -> Bound<&T> { if self.exhausted { - // When the iterator is exhausted, we usually have start == end, + // When the iterator is exhausted, it might have overflowed, // but we want the range to appear empty, containing nothing. - Excluded(&self.end) + // So in that case, we return bounds which are always empty: + // Included(start)..Excluded(start) + Excluded(&self.start) } else { Included(&self.end) } @@ -1140,16 +1154,12 @@ impl const RangeBounds for RangeInclusive { #[rustc_const_unstable(feature = "const_range", issue = "none")] impl const IntoBounds for RangeInclusive { fn into_bounds(self) -> (Bound, Bound) { - ( - Included(self.start), - if self.exhausted { - // When the iterator is exhausted, we usually have start == end, - // but we want the range to appear empty, containing nothing. - Excluded(self.end) - } else { - Included(self.end) - }, - ) + assert!( + !self.exhausted, + "attempted to convert from an exhausted `RangeInclusive` (unspecified behavior)" + ); + + (Included(self.start), Included(self.end)) } } diff --git a/library/coretests/tests/iter/range.rs b/library/coretests/tests/iter/range.rs index 4a00e6f96bda8..c9904f08c4240 100644 --- a/library/coretests/tests/iter/range.rs +++ b/library/coretests/tests/iter/range.rs @@ -93,9 +93,26 @@ fn test_range_inclusive_exhaustion() { assert_eq!(r.next(), None); assert_eq!(r.next(), None); - assert_eq!(*r.start(), 10); + assert_eq!(*r.start(), 11); assert_eq!(*r.end(), 10); - assert_ne!(r, 10..=10); + assert_eq!(r.contains(&11), false); + assert_eq!(r.contains(&10), false); + assert_eq!(r, 11..=10); + assert_eq!(r, r); + + let mut r = 255..=255_u8; + assert_eq!(r.next(), Some(255)); + assert!(r.is_empty()); + assert_eq!(r.next(), None); + assert_eq!(r.next(), None); + + assert_eq!(*r.start(), 0); + assert_eq!(*r.end(), 255); + assert_eq!(r.contains(&0), false); + assert_eq!(r.contains(&255), false); + assert_ne!(r, 255..=255); + assert_ne!(r, 0..=255); + assert_eq!(r, r); let mut r = 10..=10; assert_eq!(r.next_back(), Some(10)); @@ -103,8 +120,23 @@ fn test_range_inclusive_exhaustion() { assert_eq!(r.next_back(), None); assert_eq!(*r.start(), 10); - assert_eq!(*r.end(), 10); - assert_ne!(r, 10..=10); + assert_eq!(*r.end(), 9); + assert_eq!(r.contains(&10), false); + assert_eq!(r.contains(&9), false); + assert_eq!(r, 10..=9); + + let mut r = 0..=0_u8; + assert_eq!(r.next_back(), Some(0)); + assert!(r.is_empty()); + assert_eq!(r.next_back(), None); + + assert_eq!(*r.start(), 0); + assert_eq!(*r.end(), 255); + assert_eq!(r.contains(&0), false); + assert_eq!(r.contains(&255), false); + assert_ne!(r, 0..=0); + assert_ne!(r, 0..=255); + assert_eq!(r, r); let mut r = 10..=12; assert_eq!(r.next(), Some(10)); @@ -221,9 +253,6 @@ fn test_range_inclusive_nth() { assert_eq!((10..=15).nth(5), Some(15)); assert_eq!((10..=15).nth(6), None); - let mut exhausted_via_next = 10_u8..=20; - while exhausted_via_next.next().is_some() {} - let mut r = 10_u8..=20; assert_eq!(r.nth(2), Some(12)); assert_eq!(r, 13..=20); @@ -233,7 +262,11 @@ fn test_range_inclusive_nth() { assert_eq!(ExactSizeIterator::is_empty(&r), false); assert_eq!(r.nth(10), None); assert_eq!(r.is_empty(), true); - assert_eq!(r, exhausted_via_next); + assert_eq!(*r.start(), 27); + assert_eq!(*r.end(), 20); + assert_eq!(r.contains(&27), false); + assert_eq!(r.contains(&20), false); + assert_eq!(r, r); assert_eq!(ExactSizeIterator::is_empty(&r), true); } @@ -245,9 +278,6 @@ fn test_range_inclusive_nth_back() { assert_eq!((10..=15).nth_back(6), None); assert_eq!((-120..=80_i8).nth_back(200), Some(-120)); - let mut exhausted_via_next_back = 10_u8..=20; - while exhausted_via_next_back.next_back().is_some() {} - let mut r = 10_u8..=20; assert_eq!(r.nth_back(2), Some(18)); assert_eq!(r, 10..=17); @@ -257,7 +287,11 @@ fn test_range_inclusive_nth_back() { assert_eq!(ExactSizeIterator::is_empty(&r), false); assert_eq!(r.nth_back(10), None); assert_eq!(r.is_empty(), true); - assert_eq!(r, exhausted_via_next_back); + assert_eq!(*r.start(), 10); + assert_eq!(*r.end(), 3); + assert_eq!(r.contains(&10), false); + assert_eq!(r.contains(&3), false); + assert_eq!(r, r); assert_eq!(ExactSizeIterator::is_empty(&r), true); } diff --git a/library/coretests/tests/ops.rs b/library/coretests/tests/ops.rs index 121718f2167e2..1cbc625959886 100644 --- a/library/coretests/tests/ops.rs +++ b/library/coretests/tests/ops.rs @@ -305,7 +305,12 @@ fn test_fmt() { let mut r = 1..=1; assert_eq!(format!("{:?}", r), "1..=1"); r.next().unwrap(); - assert_eq!(format!("{:?}", r), "1..=1 (exhausted)"); + assert_eq!(format!("{:?}", r), "2..=1"); + + let mut r = 255_u8..=255; + assert_eq!(format!("{:?}", r), "255..=255"); + r.next().unwrap(); + assert_eq!(format!("{:?}", r), "0..=255 (exhausted)"); assert_eq!(format!("{:?}", 1..1), "1..1"); assert_eq!(format!("{:?}", 1..), "1.."); diff --git a/tests/codegen-llvm/range-iter-loop-opts.rs b/tests/codegen-llvm/range-iter-loop-opts.rs new file mode 100644 index 0000000000000..c51c7ad42709a --- /dev/null +++ b/tests/codegen-llvm/range-iter-loop-opts.rs @@ -0,0 +1,86 @@ +// This test ensures that Range iterators are optimizable, to +// the point that some loops can be entirely optimized out. + +//@ compile-flags: -Copt-level=3 + +#![crate_type = "lib"] + +use std::num::NonZeroU8; +use std::ops::{Range, RangeInclusive}; + +// CHECK-LABEL: @rangeinclusive_noop_loop = unnamed_addr alias void (), ptr @range_noop_loop +// CHECK-LABEL: @rangeinclusive_nz_noop_loop = unnamed_addr alias void (), ptr @range_noop_loop + +// CHECK-LABEL: @range_noop_loop( +#[no_mangle] +pub unsafe fn range_noop_loop() { + // CHECK-NEXT: start: + // CHECK-NEXT: ret void + + // This loop should be optimized out entirely. + for _ in 0_u8..100 { + () + } +} + +// CHECK-LABEL: @range_count( +#[no_mangle] +pub unsafe fn range_count(s: u8, e: u8) -> usize { + // CHECK-NOT: br {{.*}} + // CHECK: ret i{{8|16|32|64}} + + // This loop should be optimized to arithmetic. + let mut count = 0; + for _ in s..e { + count += 1; + } + count +} + +// Deduplicated to alias of range_noop_loop, checked above +#[no_mangle] +pub unsafe fn rangeinclusive_noop_loop() { + // This loop should be optimized out entirely. + for _ in 0_u8..=100 { + () + } +} + +// CHECK-LABEL: @rangeinclusive_count( +#[no_mangle] +pub unsafe fn rangeinclusive_count(s: u8, e: u8) -> usize { + // CHECK-NOT: br {{.*}} + // CHECK: ret i{{8|16|32|64}} + + // This loop should be optimized to arithmetic. + let mut count = 0; + for _ in s..=e { + count += 1; + } + count +} + +// Deduplicated to alias of range_noop_loop, checked above +#[no_mangle] +pub unsafe fn rangeinclusive_nz_noop_loop() { + // This loop should be optimized out entirely. + for _ in NonZeroU8::new(1).unwrap()..=NonZeroU8::new(100).unwrap() { + () + } +} + +// CHECK-LABEL: @rangeinclusive_nz_count( +#[no_mangle] +pub unsafe fn rangeinclusive_nz_count(s: NonZeroU8, e: NonZeroU8) -> usize { + // CHECK: br {{.*}} + // CHECK: ret i{{8|16|32|64}} + + // RangeInclusive cannot optimize the same way + // because Step::forward_overflowing on NonZero cannot + // be allowed to wrap to 0. + let mut count = 0; + for _ in s..=e { + count += 1; + } + count +} diff --git a/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-abort.mir index 3f000dcafb035..a0c9008fbb04a 100644 --- a/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-abort.mir @@ -19,8 +19,6 @@ fn inclusive_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { scope 2 { debug x => _9; } - scope 5 (inlined iter::range::>::next) { - } } scope 3 (inlined std::ops::RangeInclusive::::new) { } @@ -37,7 +35,7 @@ fn inclusive_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { bb1: { StorageLive(_7); _6 = &mut _5; - _7 = as iter::range::RangeInclusiveIteratorImpl>::spec_next(move _6) -> [return: bb2, unwind unreachable]; + _7 = as Iterator>::next(move _6) -> [return: bb2, unwind unreachable]; } bb2: { diff --git a/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-unwind.mir index 2353717362711..4b64069791995 100644 --- a/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/range_iter.inclusive_loop.PreCodegen.after.panic-unwind.mir @@ -19,8 +19,6 @@ fn inclusive_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { scope 2 { debug x => _9; } - scope 5 (inlined iter::range::>::next) { - } } scope 3 (inlined std::ops::RangeInclusive::::new) { } @@ -37,7 +35,7 @@ fn inclusive_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { bb1: { StorageLive(_7); _6 = &mut _5; - _7 = as iter::range::RangeInclusiveIteratorImpl>::spec_next(move _6) -> [return: bb2, unwind: bb8]; + _7 = as Iterator>::next(move _6) -> [return: bb2, unwind: bb8]; } bb2: { diff --git a/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-abort.mir index 13969e5d23858..45bb00312a1c7 100644 --- a/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-abort.mir @@ -3,11 +3,9 @@ fn range_inclusive_iter_next(_1: &mut std::ops::RangeInclusive) -> Option { debug it => _1; let mut _0: std::option::Option; - scope 1 (inlined iter::range::>::next) { - } bb0: { - _0 = as iter::range::RangeInclusiveIteratorImpl>::spec_next(move _1) -> [return: bb1, unwind unreachable]; + _0 = as Iterator>::next(move _1) -> [return: bb1, unwind unreachable]; } bb1: { diff --git a/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-unwind.mir index 98cd58284dfaf..39c1e784e7b6c 100644 --- a/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/range_iter.range_inclusive_iter_next.PreCodegen.after.panic-unwind.mir @@ -3,11 +3,9 @@ fn range_inclusive_iter_next(_1: &mut std::ops::RangeInclusive) -> Option { debug it => _1; let mut _0: std::option::Option; - scope 1 (inlined iter::range::>::next) { - } bb0: { - _0 = as iter::range::RangeInclusiveIteratorImpl>::spec_next(move _1) -> [return: bb1, unwind continue]; + _0 = as Iterator>::next(move _1) -> [return: bb1, unwind continue]; } bb1: { diff --git a/tests/ui/impl-trait/example-calendar.rs b/tests/ui/impl-trait/example-calendar.rs index 972ee5f4b63b4..a385cd9def1ab 100644 --- a/tests/ui/impl-trait/example-calendar.rs +++ b/tests/ui/impl-trait/example-calendar.rs @@ -163,9 +163,17 @@ impl std::iter::Step for NaiveDate { Some((0..n).fold(start, |x, _| x.succ())) } + fn forward_overflowing(start: Self, n: usize) -> (Self, bool) { + (Self::forward_checked(start, n).unwrap(), false) + } + fn backward_checked(_: Self, _: usize) -> Option { unimplemented!() } + + fn backward_overflowing(_: Self, _: usize) -> (Self, bool) { + unimplemented!() + } } #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]