From ec16141200c3180eefe01e3e0445762dabc65eb0 Mon Sep 17 00:00:00 2001 From: Ben Kimock Date: Fri, 21 Jun 2024 12:28:03 -0400 Subject: [PATCH] Monomorphize RawVec --- library/alloc/src/raw_vec.rs | 432 ++++++++++++------ library/alloc/src/raw_vec/tests.rs | 10 +- .../inline_shims.drop.Inline.panic-abort.diff | 24 +- ..._to_slice.PreCodegen.after.panic-abort.mir | 66 +-- ...to_slice.PreCodegen.after.panic-unwind.mir | 66 +-- 5 files changed, 368 insertions(+), 230 deletions(-) diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 1134c7f833e2b..3550a27c78887 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -1,8 +1,8 @@ #![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] -use core::alloc::LayoutError; use core::cmp; use core::hint; +use core::marker::PhantomData; use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ptr::{self, NonNull, Unique}; @@ -67,7 +67,12 @@ impl Cap { /// `Box<[T]>`, since `capacity()` won't yield the length. #[allow(missing_debug_implementations)] pub(crate) struct RawVec { - ptr: Unique, + inner: MonoRawVec, + _phantom: PhantomData, +} + +struct MonoRawVec { + ptr: Unique, /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case. /// /// # Safety @@ -77,6 +82,237 @@ pub(crate) struct RawVec { alloc: A, } +#[inline] +const fn max_size_for_align(align: usize) -> usize { + // (power-of-two implies align != 0.) + + // Rounded up size is: + // size_rounded_up = (size + align - 1) & !(align - 1); + // + // We know from above that align != 0. If adding (align - 1) + // does not overflow, then rounding up will be fine. + // + // Conversely, &-masking with !(align - 1) will subtract off + // only low-order-bits. Thus if overflow occurs with the sum, + // the &-mask cannot subtract enough to undo that overflow. + // + // Above implies that checking for summation overflow is both + // necessary and sufficient. + isize::MAX as usize - (align - 1) +} + +#[inline] +fn layout_array(cap: usize, size: usize, align: usize) -> Result { + // We need to check two things about the size: + // - That the total size won't overflow a `usize`, and + // - That the total size still fits in an `isize`. + // By using division we can check them both with a single threshold. + // That'd usually be a bad idea, but thankfully here the element size + // and alignment are constants, so the compiler will fold all of it. + if size != 0 && cap > max_size_for_align(align) / size { + return Err(CapacityOverflow.into()); + } + + // SAFETY: We just checked that we won't overflow `usize` when we multiply. + // This is a useless hint inside this function, but after inlining this helps + // deduplicate checks for whether the overall capacity is zero (e.g., in RawVec's + // allocation path) before/after this multiplication. + let array_size = unsafe { size.unchecked_mul(cap) }; + + // SAFETY: We just checked above that the `array_size` will not + // exceed `isize::MAX` even when rounded up to the alignment. + // And `Alignment` guarantees it's a power of two. + unsafe { Ok(Layout::from_size_align_unchecked(array_size, align)) } +} + +impl MonoRawVec { + #[inline] + fn ptr(&self) -> *mut T { + unsafe { core::mem::transmute(self.ptr) } + } + + #[inline] + fn non_null(&self) -> NonNull { + unsafe { core::mem::transmute(self.ptr) } + } + + /// # Safety: + /// + /// `cap` must not exceed `isize::MAX`. + #[inline] + unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { + // Allocators currently return a `NonNull<[u8]>` whose length matches + // the size requested. If that ever changes, the capacity here should + // change to `ptr.len() / mem::size_of::()`. + self.ptr = Unique::from(ptr.cast()); + self.cap = unsafe { Cap(cap) }; + } + + #[inline] + fn current_memory(&self, size: usize, align: usize) -> Option<(NonNull, Layout)> { + if size == 0 || self.cap.0 == 0 { + return None; + } + + // We could use Layout::array here which ensures the absence of isize and usize overflows + // and could hypothetically handle differences between stride and size, but this memory + // has already been allocated so we know it can't overflow and currently Rust does not + // support such types. So we can do better by skipping some checks and avoid an unwrap. + unsafe { + let size = size.unchecked_mul(self.cap.0); + let layout = Layout::from_size_align_unchecked(size, align); + Some((self.ptr.into(), layout)) + } + } + + #[inline] + fn grow_amortized( + &mut self, + len: usize, + additional: usize, + size: usize, + align: usize, + ) -> Result<(), TryReserveError> { + if size == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(CapacityOverflow.into()); + } + + // Nothing we can really do about these checks, sadly. + let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + + // This guarantees exponential growth. The doubling cannot overflow + // because `cap <= isize::MAX` and the type of `cap` is `usize`. + let cap = cmp::max(self.cap.0 * 2, required_cap); + let cap = cmp::max(min_non_zero_cap(size), cap); + + let new_layout = layout_array(cap, size, align)?; + + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, self.current_memory(size, align), &mut self.alloc)?; + // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items + unsafe { self.set_ptr_and_cap(ptr, cap) }; + Ok(()) + } + + #[inline] + fn grow_exact( + &mut self, + len: usize, + additional: usize, + size: usize, + align: usize, + ) -> Result<(), TryReserveError> { + if size == 0 { + // Since we return a capacity of `usize::MAX` when the type size is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(CapacityOverflow.into()); + } + + let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + let new_layout = layout_array(cap, size, align)?; + + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, self.current_memory(size, align), &mut self.alloc)?; + // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items + unsafe { + self.set_ptr_and_cap(ptr, cap); + } + Ok(()) + } + + #[cfg(not(no_global_oom_handling))] + #[inline] + fn shrink(&mut self, cap: usize, size: usize, align: usize) -> Result<(), TryReserveError> { + if size > 0 { + assert!(cap <= self.cap.0, "Tried to shrink to a larger capacity"); + } + + let (ptr, layout) = + if let Some(mem) = self.current_memory(size, align) { mem } else { return Ok(()) }; + + // If shrinking to 0, deallocate the buffer. We don't reach this point + // for the T::IS_ZST case since current_memory() will have returned + // None. + if cap == 0 { + unsafe { self.alloc.deallocate(ptr, layout) }; + self.ptr = unsafe { Unique::new_unchecked(ptr::without_provenance_mut(align)) }; + self.cap = Cap::ZERO; + } else { + let ptr = unsafe { + // `Layout::array` cannot overflow here because it would have + // overflowed earlier when capacity was larger. + let new_size = size.unchecked_mul(cap); + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + self.alloc + .shrink(ptr, layout, new_layout) + .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? + }; + // SAFETY: if the allocation is valid, then the capacity is too + unsafe { + self.set_ptr_and_cap(ptr, cap); + } + } + Ok(()) + } + + #[inline] + pub const fn new_in(alloc: A, align: usize) -> Self { + let ptr = unsafe { Unique::new_unchecked(ptr::without_provenance_mut(align)) }; + // `cap: 0` means "unallocated". zero-sized types are ignored. + Self { ptr, cap: Cap::ZERO, alloc } + } + + #[inline] + fn try_allocate_in( + capacity: usize, + init: AllocInit, + alloc: A, + size: usize, + align: usize, + ) -> Result { + // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. + if size == 0 || capacity == 0 { + Ok(Self::new_in(alloc, align)) + } else { + // We avoid `unwrap_or_else` here because it bloats the amount of + // LLVM IR generated. + let layout = match layout_array(capacity, size, align) { + Ok(layout) => layout, + Err(_) => return Err(CapacityOverflow.into()), + }; + + if let Err(err) = alloc_guard(layout.size()) { + return Err(err); + } + + let result = match init { + AllocInit::Uninitialized => alloc.allocate(layout), + #[cfg(not(no_global_oom_handling))] + AllocInit::Zeroed => alloc.allocate_zeroed(layout), + }; + let ptr = match result { + Ok(ptr) => ptr, + Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()), + }; + + // Allocators currently return a `NonNull<[u8]>` whose length + // matches the size requested. If that ever changes, the capacity + // here should change to `ptr.len() / mem::size_of::()`. + Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc }) + } + } + + #[inline] + #[rustc_no_mir_inline] + fn drop_if_needed(&mut self, size: usize, align: usize) { + if let Some((ptr, layout)) = self.current_memory(size, align) { + unsafe { self.alloc.deallocate(ptr, layout) } + } + } +} + impl RawVec { /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so /// they cannot call `Self::new()`. @@ -129,25 +365,28 @@ impl RawVec { } } +const fn min_non_zero_cap(size: usize) -> usize { + if size == 1 { + 8 + } else if size <= 1024 { + 4 + } else { + 1 + } +} + impl RawVec { // Tiny Vecs are dumb. Skip to: // - 8 if the element size is 1, because any heap allocators is likely // to round up a request of less than 8 bytes to at least 8 bytes. // - 4 if elements are moderate-sized (<= 1 KiB). // - 1 otherwise, to avoid wasting too much space for very short Vecs. - pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { - 8 - } else if mem::size_of::() <= 1024 { - 4 - } else { - 1 - }; + pub(crate) const MIN_NON_ZERO_CAP: usize = min_non_zero_cap(mem::size_of::()); /// Like `new`, but parameterized over the choice of allocator for /// the returned `RawVec`. pub const fn new_in(alloc: A) -> Self { - // `cap: 0` means "unallocated". zero-sized types are ignored. - Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc } + Self { inner: MonoRawVec::new_in(alloc, mem::align_of::()), _phantom: PhantomData } } /// Like `with_capacity`, but parameterized over the choice of @@ -201,7 +440,7 @@ impl RawVec { let me = ManuallyDrop::new(self); unsafe { let slice = ptr::slice_from_raw_parts_mut(me.ptr() as *mut MaybeUninit, len); - Box::from_raw_in(slice, ptr::read(&me.alloc)) + Box::from_raw_in(slice, ptr::read(&me.inner.alloc)) } } @@ -210,36 +449,15 @@ impl RawVec { init: AllocInit, alloc: A, ) -> Result { - // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. - - if T::IS_ZST || capacity == 0 { - Ok(Self::new_in(alloc)) - } else { - // We avoid `unwrap_or_else` here because it bloats the amount of - // LLVM IR generated. - let layout = match Layout::array::(capacity) { - Ok(layout) => layout, - Err(_) => return Err(CapacityOverflow.into()), - }; - - if let Err(err) = alloc_guard(layout.size()) { - return Err(err); - } - - let result = match init { - AllocInit::Uninitialized => alloc.allocate(layout), - #[cfg(not(no_global_oom_handling))] - AllocInit::Zeroed => alloc.allocate_zeroed(layout), - }; - let ptr = match result { - Ok(ptr) => ptr, - Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()), - }; - - // Allocators currently return a `NonNull<[u8]>` whose length - // matches the size requested. If that ever changes, the capacity - // here should change to `ptr.len() / mem::size_of::()`. - Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc }) + match MonoRawVec::try_allocate_in( + capacity, + init, + alloc, + mem::size_of::(), + mem::align_of::(), + ) { + Ok(inner) => Ok(Self { inner, _phantom: PhantomData }), + Err(e) => Err(e), } } @@ -256,7 +474,10 @@ impl RawVec { #[inline] pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self { let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } }; - Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc } + Self { + inner: MonoRawVec { ptr: unsafe { Unique::new_unchecked(ptr.cast()) }, cap, alloc }, + _phantom: PhantomData, + } } /// A convenience method for hoisting the non-null precondition out of [`RawVec::from_raw_parts_in`]. @@ -267,7 +488,10 @@ impl RawVec { #[inline] pub(crate) unsafe fn from_nonnull_in(ptr: NonNull, capacity: usize, alloc: A) -> Self { let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } }; - Self { ptr: Unique::from(ptr), cap, alloc } + Self { + inner: MonoRawVec { ptr: Unique::from(ptr.cast()), cap, alloc }, + _phantom: PhantomData, + } } /// Gets a raw pointer to the start of the allocation. Note that this is @@ -275,12 +499,12 @@ impl RawVec { /// be careful. #[inline] pub fn ptr(&self) -> *mut T { - self.ptr.as_ptr() + self.inner.ptr() } #[inline] pub fn non_null(&self) -> NonNull { - NonNull::from(self.ptr) + self.inner.non_null() } /// Gets the capacity of the allocation. @@ -288,30 +512,12 @@ impl RawVec { /// This will always be `usize::MAX` if `T` is zero-sized. #[inline(always)] pub fn capacity(&self) -> usize { - if T::IS_ZST { usize::MAX } else { self.cap.0 } + if T::IS_ZST { usize::MAX } else { self.inner.cap.0 } } /// Returns a shared reference to the allocator backing this `RawVec`. pub fn allocator(&self) -> &A { - &self.alloc - } - - fn current_memory(&self) -> Option<(NonNull, Layout)> { - if T::IS_ZST || self.cap.0 == 0 { - None - } else { - // We could use Layout::array here which ensures the absence of isize and usize overflows - // and could hypothetically handle differences between stride and size, but this memory - // has already been allocated so we know it can't overflow and currently Rust does not - // support such types. So we can do better by skipping some checks and avoid an unwrap. - const { assert!(mem::size_of::() % mem::align_of::() == 0) }; - unsafe { - let align = mem::align_of::(); - let size = mem::size_of::().unchecked_mul(self.cap.0); - let layout = Layout::from_size_align_unchecked(size, align); - Some((self.ptr.cast().into(), layout)) - } - } + &self.inner.alloc } /// Ensures that the buffer contains at least enough space to hold `len + @@ -361,7 +567,7 @@ impl RawVec { #[cfg(not(no_global_oom_handling))] #[inline(never)] pub fn grow_one(&mut self) { - if let Err(err) = self.grow_amortized(self.cap.0, 1) { + if let Err(err) = self.grow_amortized(self.inner.cap.0, 1) { handle_error(err); } } @@ -443,17 +649,6 @@ impl RawVec { additional > self.capacity().wrapping_sub(len) } - /// # Safety: - /// - /// `cap` must not exceed `isize::MAX`. - unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { - // Allocators currently return a `NonNull<[u8]>` whose length matches - // the size requested. If that ever changes, the capacity here should - // change to `ptr.len() / mem::size_of::()`. - self.ptr = Unique::from(ptr.cast()); - self.cap = unsafe { Cap(cap) }; - } - // This method is usually instantiated many times. So we want it to be as // small as possible, to improve compile times. But we also want as much of // its contents to be statically computable as possible, to make the @@ -462,85 +657,26 @@ impl RawVec { // of the code that doesn't depend on `T` as possible is in functions that // are non-generic over `T`. fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + const { assert!(mem::size_of::() % mem::align_of::() == 0) }; + // This is ensured by the calling contexts. debug_assert!(additional > 0); - if T::IS_ZST { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - // Nothing we can really do about these checks, sadly. - let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - - // This guarantees exponential growth. The doubling cannot overflow - // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = cmp::max(self.cap.0 * 2, required_cap); - let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); - - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items - unsafe { self.set_ptr_and_cap(ptr, cap) }; - Ok(()) + self.inner.grow_amortized(len, additional, mem::size_of::(), mem::align_of::()) } // The constraints on this method are much the same as those on // `grow_amortized`, but this method is usually instantiated less often so // it's less critical. fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if T::IS_ZST { - // Since we return a capacity of `usize::MAX` when the type size is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items - unsafe { - self.set_ptr_and_cap(ptr, cap); - } - Ok(()) + const { assert!(mem::size_of::() % mem::align_of::() == 0) }; + self.inner.grow_exact(len, additional, mem::size_of::(), mem::align_of::()) } #[cfg(not(no_global_oom_handling))] fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { - assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); - - let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - // See current_memory() why this assert is here const { assert!(mem::size_of::() % mem::align_of::() == 0) }; - - // If shrinking to 0, deallocate the buffer. We don't reach this point - // for the T::IS_ZST case since current_memory() will have returned - // None. - if cap == 0 { - unsafe { self.alloc.deallocate(ptr, layout) }; - self.ptr = Unique::dangling(); - self.cap = Cap::ZERO; - } else { - let ptr = unsafe { - // `Layout::array` cannot overflow here because it would have - // overflowed earlier when capacity was larger. - let new_size = mem::size_of::().unchecked_mul(cap); - let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); - self.alloc - .shrink(ptr, layout, new_layout) - .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? - }; - // SAFETY: if the allocation is valid, then the capacity is too - unsafe { - self.set_ptr_and_cap(ptr, cap); - } - } - Ok(()) + self.inner.shrink(cap, mem::size_of::(), mem::align_of::()) } } @@ -550,16 +686,13 @@ impl RawVec { // much smaller than the number of `T` types.) #[inline(never)] fn finish_grow( - new_layout: Result, + new_layout: Layout, current_memory: Option<(NonNull, Layout)>, alloc: &mut A, ) -> Result, TryReserveError> where A: Allocator, { - // Check for the error here to minimize the size of `RawVec::grow_*`. - let new_layout = new_layout.map_err(|_| CapacityOverflow)?; - alloc_guard(new_layout.size())?; let memory = if let Some((ptr, old_layout)) = current_memory { @@ -579,9 +712,8 @@ where unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. fn drop(&mut self) { - if let Some((ptr, layout)) = self.current_memory() { - unsafe { self.alloc.deallocate(ptr, layout) } - } + const { assert!(mem::size_of::() % mem::align_of::() == 0) }; + self.inner.drop_if_needed(mem::size_of::(), mem::align_of::()); } } diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index 4194be530612d..ae97ad716f901 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -1,5 +1,5 @@ use super::*; -use core::mem::size_of; +use core::mem::{align_of, size_of}; use std::cell::Cell; #[test] @@ -42,9 +42,9 @@ fn allocator_param() { let a = BoundedAlloc { fuel: Cell::new(500) }; let mut v: RawVec = RawVec::with_capacity_in(50, a); - assert_eq!(v.alloc.fuel.get(), 450); + assert_eq!(v.inner.alloc.fuel.get(), 450); v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) - assert_eq!(v.alloc.fuel.get(), 250); + assert_eq!(v.inner.alloc.fuel.get(), 250); } #[test] @@ -85,14 +85,14 @@ struct ZST; fn zst_sanity(v: &RawVec) { assert_eq!(v.capacity(), usize::MAX); assert_eq!(v.ptr(), core::ptr::Unique::::dangling().as_ptr()); - assert_eq!(v.current_memory(), None); + assert_eq!(v.inner.current_memory(size_of::(), align_of::()), None); } #[test] fn zst() { let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); - assert_eq!(std::mem::size_of::(), 0); + assert_eq!(size_of::(), 0); // All these different ways of creating the RawVec produce the same thing. diff --git a/tests/mir-opt/inline/inline_shims.drop.Inline.panic-abort.diff b/tests/mir-opt/inline/inline_shims.drop.Inline.panic-abort.diff index 45ce933a55ad2..ae171389b8184 100644 --- a/tests/mir-opt/inline/inline_shims.drop.Inline.panic-abort.diff +++ b/tests/mir-opt/inline/inline_shims.drop.Inline.panic-abort.diff @@ -18,21 +18,19 @@ + scope 3 (inlined Vec::::as_mut_ptr) { + let mut _11: &alloc::raw_vec::RawVec; + scope 4 (inlined alloc::raw_vec::RawVec::::ptr) { -+ let mut _13: std::ptr::NonNull; -+ scope 5 (inlined Unique::::as_ptr) { -+ scope 6 (inlined NonNull::::as_ptr) { -+ let mut _12: *const A; -+ } ++ let mut _12: &alloc::raw_vec::MonoRawVec; ++ scope 5 (inlined alloc::raw_vec::MonoRawVec::ptr::) { ++ let mut _13: std::ptr::Unique; + } + } + } -+ scope 7 (inlined slice_from_raw_parts_mut::) { -+ scope 8 (inlined std::ptr::from_raw_parts_mut::<[A], A>) { ++ scope 6 (inlined slice_from_raw_parts_mut::) { ++ scope 7 (inlined std::ptr::from_raw_parts_mut::<[A], A>) { + } + } + } + } -+ scope 9 (inlined std::ptr::drop_in_place::> - shim(Some(Option))) { ++ scope 8 (inlined std::ptr::drop_in_place::> - shim(Some(Option))) { + let mut _14: isize; + let mut _15: isize; + } @@ -49,13 +47,13 @@ + StorageLive(_9); + StorageLive(_11); + _11 = &((*_6).0: alloc::raw_vec::RawVec); -+ StorageLive(_13); -+ _13 = ((((*_6).0: alloc::raw_vec::RawVec).0: std::ptr::Unique).0: std::ptr::NonNull); + StorageLive(_12); -+ _12 = (_13.0: *const A); -+ _9 = move _12 as *mut A (PtrToPtr); -+ StorageDead(_12); ++ _12 = &(((*_6).0: alloc::raw_vec::RawVec).0: alloc::raw_vec::MonoRawVec); ++ StorageLive(_13); ++ _13 = ((((*_6).0: alloc::raw_vec::RawVec).0: alloc::raw_vec::MonoRawVec).0: std::ptr::Unique); ++ _9 = move _13 as *mut A (Transmute); + StorageDead(_13); ++ StorageDead(_12); + StorageDead(_11); + StorageLive(_10); + _10 = ((*_6).1: usize); diff --git a/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-abort.mir index 1c9ed25d7f2b2..ab348d3eae17b 100644 --- a/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-abort.mir @@ -5,61 +5,65 @@ fn vec_deref_to_slice(_1: &Vec) -> &[u8] { let mut _0: &[u8]; scope 1 (inlined as Deref>::deref) { debug self => _1; - let mut _4: *const u8; - let mut _5: usize; + let mut _6: *const u8; + let mut _7: usize; scope 2 (inlined Vec::::as_ptr) { debug self => _1; let mut _2: &alloc::raw_vec::RawVec; + let mut _5: *mut u8; scope 3 (inlined alloc::raw_vec::RawVec::::ptr) { debug self => _2; - let mut _3: std::ptr::NonNull; - scope 4 (inlined Unique::::as_ptr) { - debug ((self: Unique).0: std::ptr::NonNull) => _3; - debug ((self: Unique).1: std::marker::PhantomData) => const PhantomData::; - scope 5 (inlined NonNull::::as_ptr) { - debug self => _3; - } + let mut _3: &alloc::raw_vec::MonoRawVec; + scope 4 (inlined alloc::raw_vec::MonoRawVec::ptr::) { + debug self => _3; + let mut _4: std::ptr::Unique; } } } - scope 6 (inlined std::slice::from_raw_parts::<'_, u8>) { - debug data => _4; - debug len => _5; - let _6: *const [u8]; - scope 7 (inlined core::ub_checks::check_language_ub) { - scope 8 (inlined core::ub_checks::check_language_ub::runtime) { + scope 5 (inlined std::slice::from_raw_parts::<'_, u8>) { + debug data => _6; + debug len => _7; + let _8: *const [u8]; + scope 6 (inlined core::ub_checks::check_language_ub) { + scope 7 (inlined core::ub_checks::check_language_ub::runtime) { } } - scope 9 (inlined std::mem::size_of::) { + scope 8 (inlined std::mem::size_of::) { } - scope 10 (inlined align_of::) { + scope 9 (inlined align_of::) { } - scope 11 (inlined slice_from_raw_parts::) { - debug data => _4; - debug len => _5; - scope 12 (inlined std::ptr::from_raw_parts::<[u8], u8>) { - debug data_pointer => _4; - debug metadata => _5; + scope 10 (inlined slice_from_raw_parts::) { + debug data => _6; + debug len => _7; + scope 11 (inlined std::ptr::from_raw_parts::<[u8], u8>) { + debug data_pointer => _6; + debug metadata => _7; } } } } bb0: { - StorageLive(_4); + StorageLive(_5); + StorageLive(_6); StorageLive(_2); _2 = &((*_1).0: alloc::raw_vec::RawVec); StorageLive(_3); - _3 = ((((*_1).0: alloc::raw_vec::RawVec).0: std::ptr::Unique).0: std::ptr::NonNull); - _4 = (_3.0: *const u8); + _3 = &(((*_1).0: alloc::raw_vec::RawVec).0: alloc::raw_vec::MonoRawVec); + StorageLive(_4); + _4 = ((((*_1).0: alloc::raw_vec::RawVec).0: alloc::raw_vec::MonoRawVec).0: std::ptr::Unique); + _5 = move _4 as *mut u8 (Transmute); + StorageDead(_4); StorageDead(_3); + _6 = _5 as *const u8 (PointerCoercion(MutToConstPointer)); StorageDead(_2); - StorageLive(_5); - _5 = ((*_1).1: usize); - _6 = *const [u8] from (_4, _5); + StorageLive(_7); + _7 = ((*_1).1: usize); + _8 = *const [u8] from (_6, _7); + StorageDead(_7); + StorageDead(_6); StorageDead(_5); - StorageDead(_4); - _0 = &(*_6); + _0 = &(*_8); return; } } diff --git a/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-unwind.mir index 1c9ed25d7f2b2..ab348d3eae17b 100644 --- a/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/vec_deref.vec_deref_to_slice.PreCodegen.after.panic-unwind.mir @@ -5,61 +5,65 @@ fn vec_deref_to_slice(_1: &Vec) -> &[u8] { let mut _0: &[u8]; scope 1 (inlined as Deref>::deref) { debug self => _1; - let mut _4: *const u8; - let mut _5: usize; + let mut _6: *const u8; + let mut _7: usize; scope 2 (inlined Vec::::as_ptr) { debug self => _1; let mut _2: &alloc::raw_vec::RawVec; + let mut _5: *mut u8; scope 3 (inlined alloc::raw_vec::RawVec::::ptr) { debug self => _2; - let mut _3: std::ptr::NonNull; - scope 4 (inlined Unique::::as_ptr) { - debug ((self: Unique).0: std::ptr::NonNull) => _3; - debug ((self: Unique).1: std::marker::PhantomData) => const PhantomData::; - scope 5 (inlined NonNull::::as_ptr) { - debug self => _3; - } + let mut _3: &alloc::raw_vec::MonoRawVec; + scope 4 (inlined alloc::raw_vec::MonoRawVec::ptr::) { + debug self => _3; + let mut _4: std::ptr::Unique; } } } - scope 6 (inlined std::slice::from_raw_parts::<'_, u8>) { - debug data => _4; - debug len => _5; - let _6: *const [u8]; - scope 7 (inlined core::ub_checks::check_language_ub) { - scope 8 (inlined core::ub_checks::check_language_ub::runtime) { + scope 5 (inlined std::slice::from_raw_parts::<'_, u8>) { + debug data => _6; + debug len => _7; + let _8: *const [u8]; + scope 6 (inlined core::ub_checks::check_language_ub) { + scope 7 (inlined core::ub_checks::check_language_ub::runtime) { } } - scope 9 (inlined std::mem::size_of::) { + scope 8 (inlined std::mem::size_of::) { } - scope 10 (inlined align_of::) { + scope 9 (inlined align_of::) { } - scope 11 (inlined slice_from_raw_parts::) { - debug data => _4; - debug len => _5; - scope 12 (inlined std::ptr::from_raw_parts::<[u8], u8>) { - debug data_pointer => _4; - debug metadata => _5; + scope 10 (inlined slice_from_raw_parts::) { + debug data => _6; + debug len => _7; + scope 11 (inlined std::ptr::from_raw_parts::<[u8], u8>) { + debug data_pointer => _6; + debug metadata => _7; } } } } bb0: { - StorageLive(_4); + StorageLive(_5); + StorageLive(_6); StorageLive(_2); _2 = &((*_1).0: alloc::raw_vec::RawVec); StorageLive(_3); - _3 = ((((*_1).0: alloc::raw_vec::RawVec).0: std::ptr::Unique).0: std::ptr::NonNull); - _4 = (_3.0: *const u8); + _3 = &(((*_1).0: alloc::raw_vec::RawVec).0: alloc::raw_vec::MonoRawVec); + StorageLive(_4); + _4 = ((((*_1).0: alloc::raw_vec::RawVec).0: alloc::raw_vec::MonoRawVec).0: std::ptr::Unique); + _5 = move _4 as *mut u8 (Transmute); + StorageDead(_4); StorageDead(_3); + _6 = _5 as *const u8 (PointerCoercion(MutToConstPointer)); StorageDead(_2); - StorageLive(_5); - _5 = ((*_1).1: usize); - _6 = *const [u8] from (_4, _5); + StorageLive(_7); + _7 = ((*_1).1: usize); + _8 = *const [u8] from (_6, _7); + StorageDead(_7); + StorageDead(_6); StorageDead(_5); - StorageDead(_4); - _0 = &(*_6); + _0 = &(*_8); return; } }