mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	rust: alloc: remove our fork of the alloc crate
				
					
				
			It is not used anymore as `VecExt` now provides the functionality we depend on. Reviewed-by: Benno Lossin <benno.lossin@proton.me> Signed-off-by: Wedson Almeida Filho <walmeida@microsoft.com> Link: https://lore.kernel.org/r/20240328013603.206764-5-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
This commit is contained in:
		
							parent
							
								
									11795ae4cc
								
							
						
					
					
						commit
						9d0441bab7
					
				
					 15 changed files with 0 additions and 9885 deletions
				
			
		|  | @ -1,36 +0,0 @@ | |||
| # `alloc` | ||||
| 
 | ||||
| These source files come from the Rust standard library, hosted in | ||||
| the <https://github.com/rust-lang/rust> repository, licensed under | ||||
| "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details, | ||||
| see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>. | ||||
| 
 | ||||
| Please note that these files should be kept as close as possible to | ||||
| upstream. In general, only additions should be performed (e.g. new | ||||
| methods). Eventually, changes should make it into upstream so that, | ||||
| at some point, this fork can be dropped from the kernel tree. | ||||
| 
 | ||||
| The Rust upstream version on top of which these files are based matches | ||||
| the output of `scripts/min-tool-version.sh rustc`. | ||||
| 
 | ||||
| 
 | ||||
| ## Rationale | ||||
| 
 | ||||
| On one hand, kernel folks wanted to keep `alloc` in-tree to have more | ||||
| freedom in both workflow and actual features if actually needed | ||||
| (e.g. receiver types if we ended up using them), which is reasonable. | ||||
| 
 | ||||
| On the other hand, Rust folks wanted to keep `alloc` as close as | ||||
| upstream as possible and avoid as much divergence as possible, which | ||||
| is also reasonable. | ||||
| 
 | ||||
| We agreed on a middle-ground: we would keep a subset of `alloc` | ||||
| in-tree that would be as small and as close as possible to upstream. | ||||
| Then, upstream can start adding the functions that we add to `alloc` | ||||
| etc., until we reach a point where the kernel already knows exactly | ||||
| what it needs in `alloc` and all the new methods are merged into | ||||
| upstream, so that we can drop `alloc` from the kernel tree and go back | ||||
| to using the upstream one. | ||||
| 
 | ||||
| By doing this, the kernel can go a bit faster now, and Rust can | ||||
| slowly incorporate and discuss the changes as needed. | ||||
|  | @ -1,452 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| //! Memory allocation APIs
 | ||||
| 
 | ||||
| #![stable(feature = "alloc_module", since = "1.28.0")] | ||||
| 
 | ||||
| #[cfg(not(test))] | ||||
| use core::hint; | ||||
| 
 | ||||
| #[cfg(not(test))] | ||||
| use core::ptr::{self, NonNull}; | ||||
| 
 | ||||
| #[stable(feature = "alloc_module", since = "1.28.0")] | ||||
| #[doc(inline)] | ||||
| pub use core::alloc::*; | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests; | ||||
| 
 | ||||
| extern "Rust" { | ||||
|     // These are the magic symbols to call the global allocator. rustc generates
 | ||||
|     // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
 | ||||
|     // (the code expanding that attribute macro generates those functions), or to call
 | ||||
|     // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
 | ||||
|     // otherwise.
 | ||||
|     // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
 | ||||
|     // like `malloc`, `realloc`, and `free`, respectively.
 | ||||
|     #[rustc_allocator] | ||||
|     #[rustc_nounwind] | ||||
|     fn __rust_alloc(size: usize, align: usize) -> *mut u8; | ||||
|     #[rustc_deallocator] | ||||
|     #[rustc_nounwind] | ||||
|     fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); | ||||
|     #[rustc_reallocator] | ||||
|     #[rustc_nounwind] | ||||
|     fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8; | ||||
|     #[rustc_allocator_zeroed] | ||||
|     #[rustc_nounwind] | ||||
|     fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8; | ||||
| 
 | ||||
|     static __rust_no_alloc_shim_is_unstable: u8; | ||||
| } | ||||
| 
 | ||||
| /// The global memory allocator.
 | ||||
| ///
 | ||||
| /// This type implements the [`Allocator`] trait by forwarding calls
 | ||||
| /// to the allocator registered with the `#[global_allocator]` attribute
 | ||||
| /// if there is one, or the `std` crate’s default.
 | ||||
| ///
 | ||||
| /// Note: while this type is unstable, the functionality it provides can be
 | ||||
| /// accessed through the [free functions in `alloc`](self#functions).
 | ||||
| #[unstable(feature = "allocator_api", issue = "32838")] | ||||
| #[derive(Copy, Clone, Default, Debug)] | ||||
| #[cfg(not(test))] | ||||
| pub struct Global; | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| pub use std::alloc::Global; | ||||
| 
 | ||||
| /// Allocate memory with the global allocator.
 | ||||
| ///
 | ||||
| /// This function forwards calls to the [`GlobalAlloc::alloc`] method
 | ||||
| /// of the allocator registered with the `#[global_allocator]` attribute
 | ||||
| /// if there is one, or the `std` crate’s default.
 | ||||
| ///
 | ||||
| /// This function is expected to be deprecated in favor of the `alloc` method
 | ||||
| /// of the [`Global`] type when it and the [`Allocator`] trait become stable.
 | ||||
| ///
 | ||||
| /// # Safety
 | ||||
| ///
 | ||||
| /// See [`GlobalAlloc::alloc`].
 | ||||
| ///
 | ||||
| /// # Examples
 | ||||
| ///
 | ||||
| /// ```
 | ||||
| /// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
 | ||||
| ///
 | ||||
| /// unsafe {
 | ||||
| ///     let layout = Layout::new::<u16>();
 | ||||
| ///     let ptr = alloc(layout);
 | ||||
| ///     if ptr.is_null() {
 | ||||
| ///         handle_alloc_error(layout);
 | ||||
| ///     }
 | ||||
| ///
 | ||||
| ///     *(ptr as *mut u16) = 42;
 | ||||
| ///     assert_eq!(*(ptr as *mut u16), 42);
 | ||||
| ///
 | ||||
| ///     dealloc(ptr, layout);
 | ||||
| /// }
 | ||||
| /// ```
 | ||||
| #[stable(feature = "global_alloc", since = "1.28.0")] | ||||
| #[must_use = "losing the pointer will leak memory"] | ||||
| #[inline] | ||||
| pub unsafe fn alloc(layout: Layout) -> *mut u8 { | ||||
|     unsafe { | ||||
|         // Make sure we don't accidentally allow omitting the allocator shim in
 | ||||
|         // stable code until it is actually stabilized.
 | ||||
|         core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable); | ||||
| 
 | ||||
|         __rust_alloc(layout.size(), layout.align()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Deallocate memory with the global allocator.
 | ||||
| ///
 | ||||
| /// This function forwards calls to the [`GlobalAlloc::dealloc`] method
 | ||||
| /// of the allocator registered with the `#[global_allocator]` attribute
 | ||||
| /// if there is one, or the `std` crate’s default.
 | ||||
| ///
 | ||||
| /// This function is expected to be deprecated in favor of the `dealloc` method
 | ||||
| /// of the [`Global`] type when it and the [`Allocator`] trait become stable.
 | ||||
| ///
 | ||||
| /// # Safety
 | ||||
| ///
 | ||||
| /// See [`GlobalAlloc::dealloc`].
 | ||||
| #[stable(feature = "global_alloc", since = "1.28.0")] | ||||
| #[inline] | ||||
| pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) { | ||||
|     unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) } | ||||
| } | ||||
| 
 | ||||
| /// Reallocate memory with the global allocator.
 | ||||
| ///
 | ||||
| /// This function forwards calls to the [`GlobalAlloc::realloc`] method
 | ||||
| /// of the allocator registered with the `#[global_allocator]` attribute
 | ||||
| /// if there is one, or the `std` crate’s default.
 | ||||
| ///
 | ||||
| /// This function is expected to be deprecated in favor of the `realloc` method
 | ||||
| /// of the [`Global`] type when it and the [`Allocator`] trait become stable.
 | ||||
| ///
 | ||||
| /// # Safety
 | ||||
| ///
 | ||||
| /// See [`GlobalAlloc::realloc`].
 | ||||
| #[stable(feature = "global_alloc", since = "1.28.0")] | ||||
| #[must_use = "losing the pointer will leak memory"] | ||||
| #[inline] | ||||
| pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { | ||||
|     unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) } | ||||
| } | ||||
| 
 | ||||
| /// Allocate zero-initialized memory with the global allocator.
 | ||||
| ///
 | ||||
| /// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
 | ||||
| /// of the allocator registered with the `#[global_allocator]` attribute
 | ||||
| /// if there is one, or the `std` crate’s default.
 | ||||
| ///
 | ||||
| /// This function is expected to be deprecated in favor of the `alloc_zeroed` method
 | ||||
| /// of the [`Global`] type when it and the [`Allocator`] trait become stable.
 | ||||
| ///
 | ||||
| /// # Safety
 | ||||
| ///
 | ||||
| /// See [`GlobalAlloc::alloc_zeroed`].
 | ||||
| ///
 | ||||
| /// # Examples
 | ||||
| ///
 | ||||
| /// ```
 | ||||
| /// use std::alloc::{alloc_zeroed, dealloc, Layout};
 | ||||
| ///
 | ||||
| /// unsafe {
 | ||||
| ///     let layout = Layout::new::<u16>();
 | ||||
| ///     let ptr = alloc_zeroed(layout);
 | ||||
| ///
 | ||||
| ///     assert_eq!(*(ptr as *mut u16), 0);
 | ||||
| ///
 | ||||
| ///     dealloc(ptr, layout);
 | ||||
| /// }
 | ||||
| /// ```
 | ||||
| #[stable(feature = "global_alloc", since = "1.28.0")] | ||||
| #[must_use = "losing the pointer will leak memory"] | ||||
| #[inline] | ||||
| pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { | ||||
|     unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(test))] | ||||
| impl Global { | ||||
|     #[inline] | ||||
|     fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         match layout.size() { | ||||
|             0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), | ||||
|             // SAFETY: `layout` is non-zero in size,
 | ||||
|             size => unsafe { | ||||
|                 let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) }; | ||||
|                 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; | ||||
|                 Ok(NonNull::slice_from_raw_parts(ptr, size)) | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     // SAFETY: Same as `Allocator::grow`
 | ||||
|     #[inline] | ||||
|     unsafe fn grow_impl( | ||||
|         &self, | ||||
|         ptr: NonNull<u8>, | ||||
|         old_layout: Layout, | ||||
|         new_layout: Layout, | ||||
|         zeroed: bool, | ||||
|     ) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         debug_assert!( | ||||
|             new_layout.size() >= old_layout.size(), | ||||
|             "`new_layout.size()` must be greater than or equal to `old_layout.size()`" | ||||
|         ); | ||||
| 
 | ||||
|         match old_layout.size() { | ||||
|             0 => self.alloc_impl(new_layout, zeroed), | ||||
| 
 | ||||
|             // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
 | ||||
|             // as required by safety conditions. Other conditions must be upheld by the caller
 | ||||
|             old_size if old_layout.align() == new_layout.align() => unsafe { | ||||
|                 let new_size = new_layout.size(); | ||||
| 
 | ||||
|                 // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
 | ||||
|                 hint::assert_unchecked(new_size >= old_layout.size()); | ||||
| 
 | ||||
|                 let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size); | ||||
|                 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; | ||||
|                 if zeroed { | ||||
|                     raw_ptr.add(old_size).write_bytes(0, new_size - old_size); | ||||
|                 } | ||||
|                 Ok(NonNull::slice_from_raw_parts(ptr, new_size)) | ||||
|             }, | ||||
| 
 | ||||
|             // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
 | ||||
|             // both the old and new memory allocation are valid for reads and writes for `old_size`
 | ||||
|             // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
 | ||||
|             // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
 | ||||
|             // for `dealloc` must be upheld by the caller.
 | ||||
|             old_size => unsafe { | ||||
|                 let new_ptr = self.alloc_impl(new_layout, zeroed)?; | ||||
|                 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size); | ||||
|                 self.deallocate(ptr, old_layout); | ||||
|                 Ok(new_ptr) | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[unstable(feature = "allocator_api", issue = "32838")] | ||||
| #[cfg(not(test))] | ||||
| unsafe impl Allocator for Global { | ||||
|     #[inline] | ||||
|     fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         self.alloc_impl(layout, false) | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         self.alloc_impl(layout, true) | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { | ||||
|         if layout.size() != 0 { | ||||
|             // SAFETY: `layout` is non-zero in size,
 | ||||
|             // other conditions must be upheld by the caller
 | ||||
|             unsafe { dealloc(ptr.as_ptr(), layout) } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     unsafe fn grow( | ||||
|         &self, | ||||
|         ptr: NonNull<u8>, | ||||
|         old_layout: Layout, | ||||
|         new_layout: Layout, | ||||
|     ) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         // SAFETY: all conditions must be upheld by the caller
 | ||||
|         unsafe { self.grow_impl(ptr, old_layout, new_layout, false) } | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     unsafe fn grow_zeroed( | ||||
|         &self, | ||||
|         ptr: NonNull<u8>, | ||||
|         old_layout: Layout, | ||||
|         new_layout: Layout, | ||||
|     ) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         // SAFETY: all conditions must be upheld by the caller
 | ||||
|         unsafe { self.grow_impl(ptr, old_layout, new_layout, true) } | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     unsafe fn shrink( | ||||
|         &self, | ||||
|         ptr: NonNull<u8>, | ||||
|         old_layout: Layout, | ||||
|         new_layout: Layout, | ||||
|     ) -> Result<NonNull<[u8]>, AllocError> { | ||||
|         debug_assert!( | ||||
|             new_layout.size() <= old_layout.size(), | ||||
|             "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" | ||||
|         ); | ||||
| 
 | ||||
|         match new_layout.size() { | ||||
|             // SAFETY: conditions must be upheld by the caller
 | ||||
|             0 => unsafe { | ||||
|                 self.deallocate(ptr, old_layout); | ||||
|                 Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0)) | ||||
|             }, | ||||
| 
 | ||||
|             // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
 | ||||
|             new_size if old_layout.align() == new_layout.align() => unsafe { | ||||
|                 // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
 | ||||
|                 hint::assert_unchecked(new_size <= old_layout.size()); | ||||
| 
 | ||||
|                 let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size); | ||||
|                 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; | ||||
|                 Ok(NonNull::slice_from_raw_parts(ptr, new_size)) | ||||
|             }, | ||||
| 
 | ||||
|             // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
 | ||||
|             // both the old and new memory allocation are valid for reads and writes for `new_size`
 | ||||
|             // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
 | ||||
|             // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
 | ||||
|             // for `dealloc` must be upheld by the caller.
 | ||||
|             new_size => unsafe { | ||||
|                 let new_ptr = self.allocate(new_layout)?; | ||||
|                 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size); | ||||
|                 self.deallocate(ptr, old_layout); | ||||
|                 Ok(new_ptr) | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// The allocator for unique pointers.
 | ||||
| #[cfg(all(not(no_global_oom_handling), not(test)))] | ||||
| #[lang = "exchange_malloc"] | ||||
| #[inline] | ||||
| unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { | ||||
|     let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; | ||||
|     match Global.allocate(layout) { | ||||
|         Ok(ptr) => ptr.as_mut_ptr(), | ||||
|         Err(_) => handle_alloc_error(layout), | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // # Allocation error handler
 | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| extern "Rust" { | ||||
|     // This is the magic symbol to call the global alloc error handler. rustc generates
 | ||||
|     // it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
 | ||||
|     // default implementations below (`__rdl_oom`) otherwise.
 | ||||
|     fn __rust_alloc_error_handler(size: usize, align: usize) -> !; | ||||
| } | ||||
| 
 | ||||
| /// Signal a memory allocation error.
 | ||||
| ///
 | ||||
| /// Callers of memory allocation APIs wishing to cease execution
 | ||||
| /// in response to an allocation error are encouraged to call this function,
 | ||||
| /// rather than directly invoking [`panic!`] or similar.
 | ||||
| ///
 | ||||
| /// This function is guaranteed to diverge (not return normally with a value), but depending on
 | ||||
| /// global configuration, it may either panic (resulting in unwinding or aborting as per
 | ||||
| /// configuration for all panics), or abort the process (with no unwinding).
 | ||||
| ///
 | ||||
| /// The default behavior is:
 | ||||
| ///
 | ||||
| ///  * If the binary links against `std` (typically the case), then
 | ||||
| ///   print a message to standard error and abort the process.
 | ||||
| ///   This behavior can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
 | ||||
| ///   Future versions of Rust may panic by default instead.
 | ||||
| ///
 | ||||
| /// * If the binary does not link against `std` (all of its crates are marked
 | ||||
| ///   [`#![no_std]`][no_std]), then call [`panic!`] with a message.
 | ||||
| ///   [The panic handler] applies as to any panic.
 | ||||
| ///
 | ||||
| /// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
 | ||||
| /// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
 | ||||
| /// [The panic handler]: https://doc.rust-lang.org/reference/runtime.html#the-panic_handler-attribute
 | ||||
| /// [no_std]: https://doc.rust-lang.org/reference/names/preludes.html#the-no_std-attribute
 | ||||
| #[stable(feature = "global_alloc", since = "1.28.0")] | ||||
| #[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")] | ||||
| #[cfg(all(not(no_global_oom_handling), not(test)))] | ||||
| #[cold] | ||||
| pub const fn handle_alloc_error(layout: Layout) -> ! { | ||||
|     const fn ct_error(_: Layout) -> ! { | ||||
|         panic!("allocation failed"); | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn rt_error(layout: Layout) -> ! { | ||||
|         unsafe { | ||||
|             __rust_alloc_error_handler(layout.size(), layout.align()); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(feature = "panic_immediate_abort"))] | ||||
|     unsafe { | ||||
|         core::intrinsics::const_eval_select((layout,), ct_error, rt_error) | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(feature = "panic_immediate_abort")] | ||||
|     ct_error(layout) | ||||
| } | ||||
| 
 | ||||
| // For alloc test `std::alloc::handle_alloc_error` can be used directly.
 | ||||
| #[cfg(all(not(no_global_oom_handling), test))] | ||||
| pub use std::alloc::handle_alloc_error; | ||||
| 
 | ||||
| #[cfg(all(not(no_global_oom_handling), not(test)))] | ||||
| #[doc(hidden)] | ||||
| #[allow(unused_attributes)] | ||||
| #[unstable(feature = "alloc_internals", issue = "none")] | ||||
| pub mod __alloc_error_handler { | ||||
|     // called via generated `__rust_alloc_error_handler` if there is no
 | ||||
|     // `#[alloc_error_handler]`.
 | ||||
|     #[rustc_std_internal_symbol] | ||||
|     pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! { | ||||
|         extern "Rust" { | ||||
|             // This symbol is emitted by rustc next to __rust_alloc_error_handler.
 | ||||
|             // Its value depends on the -Zoom={panic,abort} compiler option.
 | ||||
|             static __rust_alloc_error_handler_should_panic: u8; | ||||
|         } | ||||
| 
 | ||||
|         if unsafe { __rust_alloc_error_handler_should_panic != 0 } { | ||||
|             panic!("memory allocation of {size} bytes failed") | ||||
|         } else { | ||||
|             core::panicking::panic_nounwind_fmt( | ||||
|                 format_args!("memory allocation of {size} bytes failed"), | ||||
|                 /* force_no_backtrace */ false, | ||||
|             ) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| /// Specialize clones into pre-allocated, uninitialized memory.
 | ||||
| /// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
 | ||||
| pub(crate) trait WriteCloneIntoRaw: Sized { | ||||
|     unsafe fn write_clone_into_raw(&self, target: *mut Self); | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T: Clone> WriteCloneIntoRaw for T { | ||||
|     #[inline] | ||||
|     default unsafe fn write_clone_into_raw(&self, target: *mut Self) { | ||||
|         // Having allocated *first* may allow the optimizer to create
 | ||||
|         // the cloned value in-place, skipping the local and move.
 | ||||
|         unsafe { target.write(self.clone()) }; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T: Copy> WriteCloneIntoRaw for T { | ||||
|     #[inline] | ||||
|     unsafe fn write_clone_into_raw(&self, target: *mut Self) { | ||||
|         // We can always copy in-place, without ever involving a local value.
 | ||||
|         unsafe { target.copy_from_nonoverlapping(self, 1) }; | ||||
|     } | ||||
| } | ||||
							
								
								
									
										2463
									
								
								rust/alloc/boxed.rs
									
									
									
									
									
								
							
							
						
						
									
										2463
									
								
								rust/alloc/boxed.rs
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,160 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| //! Collection types.
 | ||||
| 
 | ||||
| #![stable(feature = "rust1", since = "1.0.0")] | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| pub mod binary_heap; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| mod btree; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| pub mod linked_list; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| pub mod vec_deque; | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub mod btree_map { | ||||
|     //! An ordered map based on a B-Tree.
 | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     pub use super::btree::map::*; | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub mod btree_set { | ||||
|     //! An ordered set based on a B-Tree.
 | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     pub use super::btree::set::*; | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| #[doc(no_inline)] | ||||
| pub use binary_heap::BinaryHeap; | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| #[doc(no_inline)] | ||||
| pub use btree_map::BTreeMap; | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| #[doc(no_inline)] | ||||
| pub use btree_set::BTreeSet; | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| #[doc(no_inline)] | ||||
| pub use linked_list::LinkedList; | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| #[doc(no_inline)] | ||||
| pub use vec_deque::VecDeque; | ||||
| 
 | ||||
| use crate::alloc::{Layout, LayoutError}; | ||||
| use core::fmt::Display; | ||||
| 
 | ||||
| /// The error type for `try_reserve` methods.
 | ||||
| #[derive(Clone, PartialEq, Eq, Debug)] | ||||
| #[stable(feature = "try_reserve", since = "1.57.0")] | ||||
| pub struct TryReserveError { | ||||
|     kind: TryReserveErrorKind, | ||||
| } | ||||
| 
 | ||||
| impl TryReserveError { | ||||
|     /// Details about the allocation that caused the error
 | ||||
|     #[inline] | ||||
|     #[must_use] | ||||
|     #[unstable(
 | ||||
|         feature = "try_reserve_kind", | ||||
|         reason = "Uncertain how much info should be exposed", | ||||
|         issue = "48043" | ||||
|     )] | ||||
|     pub fn kind(&self) -> TryReserveErrorKind { | ||||
|         self.kind.clone() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Details of the allocation that caused a `TryReserveError`
 | ||||
| #[derive(Clone, PartialEq, Eq, Debug)] | ||||
| #[unstable(
 | ||||
|     feature = "try_reserve_kind", | ||||
|     reason = "Uncertain how much info should be exposed", | ||||
|     issue = "48043" | ||||
| )] | ||||
| pub enum TryReserveErrorKind { | ||||
|     /// Error due to the computed capacity exceeding the collection's maximum
 | ||||
|     /// (usually `isize::MAX` bytes).
 | ||||
|     CapacityOverflow, | ||||
| 
 | ||||
|     /// The memory allocator returned an error
 | ||||
|     AllocError { | ||||
|         /// The layout of allocation request that failed
 | ||||
|         layout: Layout, | ||||
| 
 | ||||
|         #[doc(hidden)] | ||||
|         #[unstable(
 | ||||
|             feature = "container_error_extra", | ||||
|             issue = "none", | ||||
|             reason = "\ | ||||
|             Enable exposing the allocator’s custom error value \ | ||||
|             if an associated type is added in the future: \ | ||||
|             https://github.com/rust-lang/wg-allocators/issues/23"
 | ||||
|         )] | ||||
|         non_exhaustive: (), | ||||
|     }, | ||||
| } | ||||
| 
 | ||||
| #[unstable(
 | ||||
|     feature = "try_reserve_kind", | ||||
|     reason = "Uncertain how much info should be exposed", | ||||
|     issue = "48043" | ||||
| )] | ||||
| impl From<TryReserveErrorKind> for TryReserveError { | ||||
|     #[inline] | ||||
|     fn from(kind: TryReserveErrorKind) -> Self { | ||||
|         Self { kind } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")] | ||||
| impl From<LayoutError> for TryReserveErrorKind { | ||||
|     /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
 | ||||
|     #[inline] | ||||
|     fn from(_: LayoutError) -> Self { | ||||
|         TryReserveErrorKind::CapacityOverflow | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "try_reserve", since = "1.57.0")] | ||||
| impl Display for TryReserveError { | ||||
|     fn fmt( | ||||
|         &self, | ||||
|         fmt: &mut core::fmt::Formatter<'_>, | ||||
|     ) -> core::result::Result<(), core::fmt::Error> { | ||||
|         fmt.write_str("memory allocation failed")?; | ||||
|         let reason = match self.kind { | ||||
|             TryReserveErrorKind::CapacityOverflow => { | ||||
|                 " because the computed capacity exceeded the collection's maximum" | ||||
|             } | ||||
|             TryReserveErrorKind::AllocError { .. } => { | ||||
|                 " because the memory allocator returned an error" | ||||
|             } | ||||
|         }; | ||||
|         fmt.write_str(reason) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// An intermediate trait for specialization of `Extend`.
 | ||||
| #[doc(hidden)] | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| trait SpecExtend<I: IntoIterator> { | ||||
|     /// Extends `self` with the contents of the given iterator.
 | ||||
|     fn spec_extend(&mut self, iter: I); | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "try_reserve", since = "1.57.0")] | ||||
| impl core::error::Error for TryReserveError {} | ||||
|  | @ -1,289 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| //! # The Rust core allocation and collections library
 | ||||
| //!
 | ||||
| //! This library provides smart pointers and collections for managing
 | ||||
| //! heap-allocated values.
 | ||||
| //!
 | ||||
| //! This library, like core, normally doesn’t need to be used directly
 | ||||
| //! since its contents are re-exported in the [`std` crate](../std/index.html).
 | ||||
| //! Crates that use the `#![no_std]` attribute however will typically
 | ||||
| //! not depend on `std`, so they’d use this crate instead.
 | ||||
| //!
 | ||||
| //! ## Boxed values
 | ||||
| //!
 | ||||
| //! The [`Box`] type is a smart pointer type. There can only be one owner of a
 | ||||
| //! [`Box`], and the owner can decide to mutate the contents, which live on the
 | ||||
| //! heap.
 | ||||
| //!
 | ||||
| //! This type can be sent among threads efficiently as the size of a `Box` value
 | ||||
| //! is the same as that of a pointer. Tree-like data structures are often built
 | ||||
| //! with boxes because each node often has only one owner, the parent.
 | ||||
| //!
 | ||||
| //! ## Reference counted pointers
 | ||||
| //!
 | ||||
| //! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
 | ||||
| //! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
 | ||||
| //! only allows access to `&T`, a shared reference.
 | ||||
| //!
 | ||||
| //! This type is useful when inherited mutability (such as using [`Box`]) is too
 | ||||
| //! constraining for an application, and is often paired with the [`Cell`] or
 | ||||
| //! [`RefCell`] types in order to allow mutation.
 | ||||
| //!
 | ||||
| //! ## Atomically reference counted pointers
 | ||||
| //!
 | ||||
| //! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
 | ||||
| //! provides all the same functionality of [`Rc`], except it requires that the
 | ||||
| //! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
 | ||||
| //! sendable while [`Rc<T>`][`Rc`] is not.
 | ||||
| //!
 | ||||
| //! This type allows for shared access to the contained data, and is often
 | ||||
| //! paired with synchronization primitives such as mutexes to allow mutation of
 | ||||
| //! shared resources.
 | ||||
| //!
 | ||||
| //! ## Collections
 | ||||
| //!
 | ||||
| //! Implementations of the most common general purpose data structures are
 | ||||
| //! defined in this library. They are re-exported through the
 | ||||
| //! [standard collections library](../std/collections/index.html).
 | ||||
| //!
 | ||||
| //! ## Heap interfaces
 | ||||
| //!
 | ||||
| //! The [`alloc`](alloc/index.html) module defines the low-level interface to the
 | ||||
| //! default global allocator. It is not compatible with the libc allocator API.
 | ||||
| //!
 | ||||
| //! [`Arc`]: sync
 | ||||
| //! [`Box`]: boxed
 | ||||
| //! [`Cell`]: core::cell
 | ||||
| //! [`Rc`]: rc
 | ||||
| //! [`RefCell`]: core::cell
 | ||||
| 
 | ||||
| // To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
 | ||||
| // able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
 | ||||
| // rustc itself never sets the feature, so this line has no effect there.
 | ||||
| #![cfg(any(not(feature = "miri-test-libstd"), test, doctest))] | ||||
| //
 | ||||
| #![allow(unused_attributes)] | ||||
| #![stable(feature = "alloc", since = "1.36.0")] | ||||
| #![doc(
 | ||||
|     html_playground_url = "https://play.rust-lang.org/", | ||||
|     issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", | ||||
|     test(no_crate_inject, attr(allow(unused_variables), deny(warnings))) | ||||
| )] | ||||
| #![doc(cfg_hide(
 | ||||
|     not(test), | ||||
|     not(any(test, bootstrap)), | ||||
|     any(not(feature = "miri-test-libstd"), test, doctest), | ||||
|     no_global_oom_handling, | ||||
|     not(no_global_oom_handling), | ||||
|     not(no_rc), | ||||
|     not(no_sync), | ||||
|     target_has_atomic = "ptr" | ||||
| ))] | ||||
| #![doc(rust_logo)] | ||||
| #![feature(rustdoc_internals)] | ||||
| #![no_std] | ||||
| #![needs_allocator] | ||||
| // Lints:
 | ||||
| #![deny(unsafe_op_in_unsafe_fn)] | ||||
| #![deny(fuzzy_provenance_casts)] | ||||
| #![warn(deprecated_in_future)] | ||||
| #![warn(missing_debug_implementations)] | ||||
| #![warn(missing_docs)] | ||||
| #![allow(explicit_outlives_requirements)] | ||||
| #![warn(multiple_supertrait_upcastable)] | ||||
| #![allow(internal_features)] | ||||
| #![allow(rustdoc::redundant_explicit_links)] | ||||
| //
 | ||||
| // Library features:
 | ||||
| // tidy-alphabetical-start
 | ||||
| #![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))] | ||||
| #![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] | ||||
| #![cfg_attr(test, feature(is_sorted))] | ||||
| #![cfg_attr(test, feature(new_uninit))] | ||||
| #![feature(alloc_layout_extra)] | ||||
| #![feature(allocator_api)] | ||||
| #![feature(array_chunks)] | ||||
| #![feature(array_into_iter_constructors)] | ||||
| #![feature(array_windows)] | ||||
| #![feature(ascii_char)] | ||||
| #![feature(assert_matches)] | ||||
| #![feature(async_iterator)] | ||||
| #![feature(coerce_unsized)] | ||||
| #![feature(const_align_of_val)] | ||||
| #![feature(const_box)] | ||||
| #![cfg_attr(not(no_borrow), feature(const_cow_is_borrowed))] | ||||
| #![feature(const_eval_select)] | ||||
| #![feature(const_maybe_uninit_as_mut_ptr)] | ||||
| #![feature(const_maybe_uninit_write)] | ||||
| #![feature(const_pin)] | ||||
| #![feature(const_refs_to_cell)] | ||||
| #![feature(const_size_of_val)] | ||||
| #![feature(const_waker)] | ||||
| #![feature(core_intrinsics)] | ||||
| #![feature(deprecated_suggestion)] | ||||
| #![feature(dispatch_from_dyn)] | ||||
| #![feature(error_generic_member_access)] | ||||
| #![feature(error_in_core)] | ||||
| #![feature(exact_size_is_empty)] | ||||
| #![feature(extend_one)] | ||||
| #![feature(fmt_internals)] | ||||
| #![feature(fn_traits)] | ||||
| #![feature(hasher_prefixfree_extras)] | ||||
| #![feature(hint_assert_unchecked)] | ||||
| #![feature(inline_const)] | ||||
| #![feature(inplace_iteration)] | ||||
| #![feature(iter_advance_by)] | ||||
| #![feature(iter_next_chunk)] | ||||
| #![feature(iter_repeat_n)] | ||||
| #![feature(layout_for_ptr)] | ||||
| #![feature(maybe_uninit_slice)] | ||||
| #![feature(maybe_uninit_uninit_array)] | ||||
| #![feature(maybe_uninit_uninit_array_transpose)] | ||||
| #![feature(non_null_convenience)] | ||||
| #![feature(panic_internals)] | ||||
| #![feature(pattern)] | ||||
| #![feature(ptr_internals)] | ||||
| #![feature(ptr_metadata)] | ||||
| #![feature(ptr_sub_ptr)] | ||||
| #![feature(receiver_trait)] | ||||
| #![feature(set_ptr_value)] | ||||
| #![feature(sized_type_properties)] | ||||
| #![feature(slice_from_ptr_range)] | ||||
| #![feature(slice_ptr_get)] | ||||
| #![feature(slice_ptr_len)] | ||||
| #![feature(slice_range)] | ||||
| #![feature(std_internals)] | ||||
| #![feature(str_internals)] | ||||
| #![feature(strict_provenance)] | ||||
| #![feature(trusted_fused)] | ||||
| #![feature(trusted_len)] | ||||
| #![feature(trusted_random_access)] | ||||
| #![feature(try_trait_v2)] | ||||
| #![feature(tuple_trait)] | ||||
| #![feature(unchecked_math)] | ||||
| #![feature(unicode_internals)] | ||||
| #![feature(unsize)] | ||||
| #![feature(utf8_chunks)] | ||||
| // tidy-alphabetical-end
 | ||||
| //
 | ||||
| // Language features:
 | ||||
| // tidy-alphabetical-start
 | ||||
| #![cfg_attr(not(test), feature(coroutine_trait))] | ||||
| #![cfg_attr(test, feature(panic_update_hook))] | ||||
| #![cfg_attr(test, feature(test))] | ||||
| #![feature(allocator_internals)] | ||||
| #![feature(allow_internal_unstable)] | ||||
| #![feature(associated_type_bounds)] | ||||
| #![feature(c_unwind)] | ||||
| #![feature(cfg_sanitize)] | ||||
| #![feature(const_mut_refs)] | ||||
| #![feature(const_precise_live_drops)] | ||||
| #![feature(const_ptr_write)] | ||||
| #![feature(const_trait_impl)] | ||||
| #![feature(const_try)] | ||||
| #![feature(decl_macro)] | ||||
| #![feature(dropck_eyepatch)] | ||||
| #![feature(exclusive_range_pattern)] | ||||
| #![feature(fundamental)] | ||||
| #![feature(hashmap_internals)] | ||||
| #![feature(lang_items)] | ||||
| #![feature(min_specialization)] | ||||
| #![feature(multiple_supertrait_upcastable)] | ||||
| #![feature(negative_impls)] | ||||
| #![feature(never_type)] | ||||
| #![feature(pointer_is_aligned)] | ||||
| #![feature(rustc_allow_const_fn_unstable)] | ||||
| #![feature(rustc_attrs)] | ||||
| #![feature(slice_internals)] | ||||
| #![feature(staged_api)] | ||||
| #![feature(stmt_expr_attributes)] | ||||
| #![feature(unboxed_closures)] | ||||
| #![feature(unsized_fn_params)] | ||||
| #![feature(with_negative_coherence)] | ||||
| // tidy-alphabetical-end
 | ||||
| //
 | ||||
| // Rustdoc features:
 | ||||
| #![feature(doc_cfg)] | ||||
| #![feature(doc_cfg_hide)] | ||||
| // Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
 | ||||
| // blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
 | ||||
| // that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
 | ||||
| // from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
 | ||||
| #![feature(intra_doc_pointers)] | ||||
| 
 | ||||
| // Allow testing this library
 | ||||
| #[cfg(test)] | ||||
| #[macro_use] | ||||
| extern crate std; | ||||
| #[cfg(test)] | ||||
| extern crate test; | ||||
| #[cfg(test)] | ||||
| mod testing; | ||||
| 
 | ||||
| // Module with internal macros used by other modules (needs to be included before other modules).
 | ||||
| #[cfg(not(no_macros))] | ||||
| #[macro_use] | ||||
| mod macros; | ||||
| 
 | ||||
| mod raw_vec; | ||||
| 
 | ||||
| // Heaps provided for low-level allocation strategies
 | ||||
| 
 | ||||
| pub mod alloc; | ||||
| 
 | ||||
| // Primitive types using the heaps above
 | ||||
| 
 | ||||
| // Need to conditionally define the mod from `boxed.rs` to avoid
 | ||||
| // duplicating the lang-items when building in test cfg; but also need
 | ||||
| // to allow code to have `use boxed::Box;` declarations.
 | ||||
| #[cfg(not(test))] | ||||
| pub mod boxed; | ||||
| #[cfg(test)] | ||||
| mod boxed { | ||||
|     pub use std::boxed::Box; | ||||
| } | ||||
| #[cfg(not(no_borrow))] | ||||
| pub mod borrow; | ||||
| pub mod collections; | ||||
| #[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))] | ||||
| pub mod ffi; | ||||
| #[cfg(not(no_fmt))] | ||||
| pub mod fmt; | ||||
| #[cfg(not(no_rc))] | ||||
| pub mod rc; | ||||
| pub mod slice; | ||||
| #[cfg(not(no_str))] | ||||
| pub mod str; | ||||
| #[cfg(not(no_string))] | ||||
| pub mod string; | ||||
| #[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))] | ||||
| pub mod sync; | ||||
| #[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))] | ||||
| pub mod task; | ||||
| #[cfg(test)] | ||||
| mod tests; | ||||
| pub mod vec; | ||||
| 
 | ||||
| #[doc(hidden)] | ||||
| #[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")] | ||||
| pub mod __export { | ||||
|     pub use core::format_args; | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| #[allow(dead_code)] // Not used in all configurations
 | ||||
| pub(crate) mod test_helpers { | ||||
|     /// Copied from `std::test_helpers::test_rng`, since these tests rely on the
 | ||||
|     /// seed not being the same for every RNG invocation too.
 | ||||
|     pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng { | ||||
|         use std::hash::{BuildHasher, Hash, Hasher}; | ||||
|         let mut hasher = std::hash::RandomState::new().build_hasher(); | ||||
|         std::panic::Location::caller().hash(&mut hasher); | ||||
|         let hc64 = hasher.finish(); | ||||
|         let seed_vec = | ||||
|             hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>(); | ||||
|         let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap(); | ||||
|         rand::SeedableRng::from_seed(seed) | ||||
|     } | ||||
| } | ||||
|  | @ -1,610 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| #![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] | ||||
| 
 | ||||
| use core::alloc::LayoutError; | ||||
| use core::cmp; | ||||
| use core::hint; | ||||
| use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; | ||||
| use core::ptr::{self, NonNull, Unique}; | ||||
| use core::slice; | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use crate::alloc::handle_alloc_error; | ||||
| use crate::alloc::{Allocator, Global, Layout}; | ||||
| use crate::boxed::Box; | ||||
| use crate::collections::TryReserveError; | ||||
| use crate::collections::TryReserveErrorKind::*; | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests; | ||||
| 
 | ||||
| enum AllocInit { | ||||
|     /// The contents of the new memory are uninitialized.
 | ||||
|     Uninitialized, | ||||
|     /// The new memory is guaranteed to be zeroed.
 | ||||
|     #[allow(dead_code)] | ||||
|     Zeroed, | ||||
| } | ||||
| 
 | ||||
| #[repr(transparent)] | ||||
| #[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))] | ||||
| #[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))] | ||||
| #[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))] | ||||
| struct Cap(usize); | ||||
| 
 | ||||
| impl Cap { | ||||
|     const ZERO: Cap = unsafe { Cap(0) }; | ||||
| } | ||||
| 
 | ||||
| /// A low-level utility for more ergonomically allocating, reallocating, and deallocating
 | ||||
| /// a buffer of memory on the heap without having to worry about all the corner cases
 | ||||
| /// involved. This type is excellent for building your own data structures like Vec and VecDeque.
 | ||||
| /// In particular:
 | ||||
| ///
 | ||||
| /// * Produces `Unique::dangling()` on zero-sized types.
 | ||||
| /// * Produces `Unique::dangling()` on zero-length allocations.
 | ||||
| /// * Avoids freeing `Unique::dangling()`.
 | ||||
| /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
 | ||||
| /// * Guards against 32-bit systems allocating more than isize::MAX bytes.
 | ||||
| /// * Guards against overflowing your length.
 | ||||
| /// * Calls `handle_alloc_error` for fallible allocations.
 | ||||
| /// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
 | ||||
| /// * Uses the excess returned from the allocator to use the largest available capacity.
 | ||||
| ///
 | ||||
| /// This type does not in anyway inspect the memory that it manages. When dropped it *will*
 | ||||
| /// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
 | ||||
| /// to handle the actual things *stored* inside of a `RawVec`.
 | ||||
| ///
 | ||||
| /// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
 | ||||
| /// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
 | ||||
| /// `Box<[T]>`, since `capacity()` won't yield the length.
 | ||||
| #[allow(missing_debug_implementations)] | ||||
| pub(crate) struct RawVec<T, A: Allocator = Global> { | ||||
|     ptr: Unique<T>, | ||||
|     /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
 | ||||
|     ///
 | ||||
|     /// # Safety
 | ||||
|     ///
 | ||||
|     /// `cap` must be in the `0..=isize::MAX` range.
 | ||||
|     cap: Cap, | ||||
|     alloc: A, | ||||
| } | ||||
| 
 | ||||
| impl<T> RawVec<T, Global> { | ||||
|     /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
 | ||||
|     /// they cannot call `Self::new()`.
 | ||||
|     ///
 | ||||
|     /// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
 | ||||
|     /// that would truly const-call something unstable.
 | ||||
|     pub const NEW: Self = Self::new(); | ||||
| 
 | ||||
|     /// Creates the biggest possible `RawVec` (on the system heap)
 | ||||
|     /// without allocating. If `T` has positive size, then this makes a
 | ||||
|     /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
 | ||||
|     /// `RawVec` with capacity `usize::MAX`. Useful for implementing
 | ||||
|     /// delayed allocation.
 | ||||
|     #[must_use] | ||||
|     pub const fn new() -> Self { | ||||
|         Self::new_in(Global) | ||||
|     } | ||||
| 
 | ||||
|     /// Creates a `RawVec` (on the system heap) with exactly the
 | ||||
|     /// capacity and alignment requirements for a `[T; capacity]`. This is
 | ||||
|     /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
 | ||||
|     /// zero-sized. Note that if `T` is zero-sized this means you will
 | ||||
|     /// *not* get a `RawVec` with the requested capacity.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// Panics if the requested capacity exceeds `isize::MAX` bytes.
 | ||||
|     ///
 | ||||
|     /// # Aborts
 | ||||
|     ///
 | ||||
|     /// Aborts on OOM.
 | ||||
|     #[cfg(not(any(no_global_oom_handling, test)))] | ||||
|     #[must_use] | ||||
|     #[inline] | ||||
|     pub fn with_capacity(capacity: usize) -> Self { | ||||
|         Self::with_capacity_in(capacity, Global) | ||||
|     } | ||||
| 
 | ||||
|     /// Like `with_capacity`, but guarantees the buffer is zeroed.
 | ||||
|     #[cfg(not(any(no_global_oom_handling, test)))] | ||||
|     #[must_use] | ||||
|     #[inline] | ||||
|     pub fn with_capacity_zeroed(capacity: usize) -> Self { | ||||
|         Self::with_capacity_zeroed_in(capacity, Global) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T, A: Allocator> RawVec<T, A> { | ||||
|     // Tiny Vecs are dumb. Skip to:
 | ||||
|     // - 8 if the element size is 1, because any heap allocators is likely
 | ||||
|     //   to round up a request of less than 8 bytes to at least 8 bytes.
 | ||||
|     // - 4 if elements are moderate-sized (<= 1 KiB).
 | ||||
|     // - 1 otherwise, to avoid wasting too much space for very short Vecs.
 | ||||
|     pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 { | ||||
|         8 | ||||
|     } else if mem::size_of::<T>() <= 1024 { | ||||
|         4 | ||||
|     } else { | ||||
|         1 | ||||
|     }; | ||||
| 
 | ||||
|     /// Like `new`, but parameterized over the choice of allocator for
 | ||||
|     /// the returned `RawVec`.
 | ||||
|     pub const fn new_in(alloc: A) -> Self { | ||||
|         // `cap: 0` means "unallocated". zero-sized types are ignored.
 | ||||
|         Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc } | ||||
|     } | ||||
| 
 | ||||
|     /// Like `with_capacity`, but parameterized over the choice of
 | ||||
|     /// allocator for the returned `RawVec`.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[inline] | ||||
|     pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { | ||||
|         Self::allocate_in(capacity, AllocInit::Uninitialized, alloc) | ||||
|     } | ||||
| 
 | ||||
|     /// Like `try_with_capacity`, but parameterized over the choice of
 | ||||
|     /// allocator for the returned `RawVec`.
 | ||||
|     #[inline] | ||||
|     pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> { | ||||
|         Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc) | ||||
|     } | ||||
| 
 | ||||
|     /// Like `with_capacity_zeroed`, but parameterized over the choice
 | ||||
|     /// of allocator for the returned `RawVec`.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[inline] | ||||
|     pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { | ||||
|         Self::allocate_in(capacity, AllocInit::Zeroed, alloc) | ||||
|     } | ||||
| 
 | ||||
|     /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
 | ||||
|     ///
 | ||||
|     /// Note that this will correctly reconstitute any `cap` changes
 | ||||
|     /// that may have been performed. (See description of type for details.)
 | ||||
|     ///
 | ||||
|     /// # Safety
 | ||||
|     ///
 | ||||
|     /// * `len` must be greater than or equal to the most recently requested capacity, and
 | ||||
|     /// * `len` must be less than or equal to `self.capacity()`.
 | ||||
|     ///
 | ||||
|     /// Note, that the requested capacity and `self.capacity()` could differ, as
 | ||||
|     /// an allocator could overallocate and return a greater memory block than requested.
 | ||||
|     pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> { | ||||
|         // Sanity-check one half of the safety requirement (we cannot check the other half).
 | ||||
|         debug_assert!( | ||||
|             len <= self.capacity(), | ||||
|             "`len` must be smaller than or equal to `self.capacity()`" | ||||
|         ); | ||||
| 
 | ||||
|         let me = ManuallyDrop::new(self); | ||||
|         unsafe { | ||||
|             let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len); | ||||
|             Box::from_raw_in(slice, ptr::read(&me.alloc)) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { | ||||
|         // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
 | ||||
|         if T::IS_ZST || capacity == 0 { | ||||
|             Self::new_in(alloc) | ||||
|         } else { | ||||
|             // We avoid `unwrap_or_else` here because it bloats the amount of
 | ||||
|             // LLVM IR generated.
 | ||||
|             let layout = match Layout::array::<T>(capacity) { | ||||
|                 Ok(layout) => layout, | ||||
|                 Err(_) => capacity_overflow(), | ||||
|             }; | ||||
|             match alloc_guard(layout.size()) { | ||||
|                 Ok(_) => {} | ||||
|                 Err(_) => capacity_overflow(), | ||||
|             } | ||||
|             let result = match init { | ||||
|                 AllocInit::Uninitialized => alloc.allocate(layout), | ||||
|                 AllocInit::Zeroed => alloc.allocate_zeroed(layout), | ||||
|             }; | ||||
|             let ptr = match result { | ||||
|                 Ok(ptr) => ptr, | ||||
|                 Err(_) => handle_alloc_error(layout), | ||||
|             }; | ||||
| 
 | ||||
|             // Allocators currently return a `NonNull<[u8]>` whose length
 | ||||
|             // matches the size requested. If that ever changes, the capacity
 | ||||
|             // here should change to `ptr.len() / mem::size_of::<T>()`.
 | ||||
|             Self { | ||||
|                 ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, | ||||
|                 cap: unsafe { Cap(capacity) }, | ||||
|                 alloc, | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> { | ||||
|         // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
 | ||||
|         if T::IS_ZST || capacity == 0 { | ||||
|             return Ok(Self::new_in(alloc)); | ||||
|         } | ||||
| 
 | ||||
|         let layout = Layout::array::<T>(capacity).map_err(|_| CapacityOverflow)?; | ||||
|         alloc_guard(layout.size())?; | ||||
|         let result = match init { | ||||
|             AllocInit::Uninitialized => alloc.allocate(layout), | ||||
|             AllocInit::Zeroed => alloc.allocate_zeroed(layout), | ||||
|         }; | ||||
|         let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?; | ||||
| 
 | ||||
|         // Allocators currently return a `NonNull<[u8]>` whose length
 | ||||
|         // matches the size requested. If that ever changes, the capacity
 | ||||
|         // here should change to `ptr.len() / mem::size_of::<T>()`.
 | ||||
|         Ok(Self { | ||||
|             ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, | ||||
|             cap: unsafe { Cap(capacity) }, | ||||
|             alloc, | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
 | ||||
|     ///
 | ||||
|     /// # Safety
 | ||||
|     ///
 | ||||
|     /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
 | ||||
|     /// `capacity`.
 | ||||
|     /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
 | ||||
|     /// systems). For ZSTs capacity is ignored.
 | ||||
|     /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
 | ||||
|     /// guaranteed.
 | ||||
|     #[inline] | ||||
|     pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self { | ||||
|         let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } }; | ||||
|         Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc } | ||||
|     } | ||||
| 
 | ||||
|     /// Gets a raw pointer to the start of the allocation. Note that this is
 | ||||
|     /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
 | ||||
|     /// be careful.
 | ||||
|     #[inline] | ||||
|     pub fn ptr(&self) -> *mut T { | ||||
|         self.ptr.as_ptr() | ||||
|     } | ||||
| 
 | ||||
|     /// Gets the capacity of the allocation.
 | ||||
|     ///
 | ||||
|     /// This will always be `usize::MAX` if `T` is zero-sized.
 | ||||
|     #[inline(always)] | ||||
|     pub fn capacity(&self) -> usize { | ||||
|         if T::IS_ZST { usize::MAX } else { self.cap.0 } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns a shared reference to the allocator backing this `RawVec`.
 | ||||
|     pub fn allocator(&self) -> &A { | ||||
|         &self.alloc | ||||
|     } | ||||
| 
 | ||||
|     fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> { | ||||
|         if T::IS_ZST || self.cap.0 == 0 { | ||||
|             None | ||||
|         } else { | ||||
|             // We could use Layout::array here which ensures the absence of isize and usize overflows
 | ||||
|             // and could hypothetically handle differences between stride and size, but this memory
 | ||||
|             // has already been allocated so we know it can't overflow and currently rust does not
 | ||||
|             // support such types. So we can do better by skipping some checks and avoid an unwrap.
 | ||||
|             let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) }; | ||||
|             unsafe { | ||||
|                 let align = mem::align_of::<T>(); | ||||
|                 let size = mem::size_of::<T>().unchecked_mul(self.cap.0); | ||||
|                 let layout = Layout::from_size_align_unchecked(size, align); | ||||
|                 Some((self.ptr.cast().into(), layout)) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Ensures that the buffer contains at least enough space to hold `len +
 | ||||
|     /// additional` elements. If it doesn't already have enough capacity, will
 | ||||
|     /// reallocate enough space plus comfortable slack space to get amortized
 | ||||
|     /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
 | ||||
|     /// itself to panic.
 | ||||
|     ///
 | ||||
|     /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
 | ||||
|     /// the requested space. This is not really unsafe, but the unsafe
 | ||||
|     /// code *you* write that relies on the behavior of this function may break.
 | ||||
|     ///
 | ||||
|     /// This is ideal for implementing a bulk-push operation like `extend`.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// Panics if the new capacity exceeds `isize::MAX` _bytes_.
 | ||||
|     ///
 | ||||
|     /// # Aborts
 | ||||
|     ///
 | ||||
|     /// Aborts on OOM.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[inline] | ||||
|     pub fn reserve(&mut self, len: usize, additional: usize) { | ||||
|         // Callers expect this function to be very cheap when there is already sufficient capacity.
 | ||||
|         // Therefore, we move all the resizing and error-handling logic from grow_amortized and
 | ||||
|         // handle_reserve behind a call, while making sure that this function is likely to be
 | ||||
|         // inlined as just a comparison and a call if the comparison fails.
 | ||||
|         #[cold] | ||||
|         fn do_reserve_and_handle<T, A: Allocator>( | ||||
|             slf: &mut RawVec<T, A>, | ||||
|             len: usize, | ||||
|             additional: usize, | ||||
|         ) { | ||||
|             handle_reserve(slf.grow_amortized(len, additional)); | ||||
|         } | ||||
| 
 | ||||
|         if self.needs_to_grow(len, additional) { | ||||
|             do_reserve_and_handle(self, len, additional); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// A specialized version of `reserve()` used only by the hot and
 | ||||
|     /// oft-instantiated `Vec::push()`, which does its own capacity check.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[inline(never)] | ||||
|     pub fn reserve_for_push(&mut self, len: usize) { | ||||
|         handle_reserve(self.grow_amortized(len, 1)); | ||||
|     } | ||||
| 
 | ||||
|     /// The same as `reserve`, but returns on errors instead of panicking or aborting.
 | ||||
|     pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { | ||||
|         if self.needs_to_grow(len, additional) { | ||||
|             self.grow_amortized(len, additional)?; | ||||
|         } | ||||
|         unsafe { | ||||
|             // Inform the optimizer that the reservation has succeeded or wasn't needed
 | ||||
|             hint::assert_unchecked(!self.needs_to_grow(len, additional)); | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting.
 | ||||
|     #[inline(never)] | ||||
|     pub fn try_reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> { | ||||
|         self.grow_amortized(len, 1) | ||||
|     } | ||||
| 
 | ||||
|     /// Ensures that the buffer contains at least enough space to hold `len +
 | ||||
|     /// additional` elements. If it doesn't already, will reallocate the
 | ||||
|     /// minimum possible amount of memory necessary. Generally this will be
 | ||||
|     /// exactly the amount of memory necessary, but in principle the allocator
 | ||||
|     /// is free to give back more than we asked for.
 | ||||
|     ///
 | ||||
|     /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
 | ||||
|     /// the requested space. This is not really unsafe, but the unsafe code
 | ||||
|     /// *you* write that relies on the behavior of this function may break.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// Panics if the new capacity exceeds `isize::MAX` _bytes_.
 | ||||
|     ///
 | ||||
|     /// # Aborts
 | ||||
|     ///
 | ||||
|     /// Aborts on OOM.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     pub fn reserve_exact(&mut self, len: usize, additional: usize) { | ||||
|         handle_reserve(self.try_reserve_exact(len, additional)); | ||||
|     } | ||||
| 
 | ||||
|     /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
 | ||||
|     pub fn try_reserve_exact( | ||||
|         &mut self, | ||||
|         len: usize, | ||||
|         additional: usize, | ||||
|     ) -> Result<(), TryReserveError> { | ||||
|         if self.needs_to_grow(len, additional) { | ||||
|             self.grow_exact(len, additional)?; | ||||
|         } | ||||
|         unsafe { | ||||
|             // Inform the optimizer that the reservation has succeeded or wasn't needed
 | ||||
|             hint::assert_unchecked(!self.needs_to_grow(len, additional)); | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Shrinks the buffer down to the specified capacity. If the given amount
 | ||||
|     /// is 0, actually completely deallocates.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// Panics if the given amount is *larger* than the current capacity.
 | ||||
|     ///
 | ||||
|     /// # Aborts
 | ||||
|     ///
 | ||||
|     /// Aborts on OOM.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     pub fn shrink_to_fit(&mut self, cap: usize) { | ||||
|         handle_reserve(self.shrink(cap)); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T, A: Allocator> RawVec<T, A> { | ||||
|     /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
 | ||||
|     /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
 | ||||
|     fn needs_to_grow(&self, len: usize, additional: usize) -> bool { | ||||
|         additional > self.capacity().wrapping_sub(len) | ||||
|     } | ||||
| 
 | ||||
|     /// # Safety:
 | ||||
|     ///
 | ||||
|     /// `cap` must not exceed `isize::MAX`.
 | ||||
|     unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { | ||||
|         // Allocators currently return a `NonNull<[u8]>` whose length matches
 | ||||
|         // the size requested. If that ever changes, the capacity here should
 | ||||
|         // change to `ptr.len() / mem::size_of::<T>()`.
 | ||||
|         self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; | ||||
|         self.cap = unsafe { Cap(cap) }; | ||||
|     } | ||||
| 
 | ||||
|     // This method is usually instantiated many times. So we want it to be as
 | ||||
|     // small as possible, to improve compile times. But we also want as much of
 | ||||
|     // its contents to be statically computable as possible, to make the
 | ||||
|     // generated code run faster. Therefore, this method is carefully written
 | ||||
|     // so that all of the code that depends on `T` is within it, while as much
 | ||||
|     // of the code that doesn't depend on `T` as possible is in functions that
 | ||||
|     // are non-generic over `T`.
 | ||||
|     fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { | ||||
|         // This is ensured by the calling contexts.
 | ||||
|         debug_assert!(additional > 0); | ||||
| 
 | ||||
|         if T::IS_ZST { | ||||
|             // Since we return a capacity of `usize::MAX` when `elem_size` is
 | ||||
|             // 0, getting to here necessarily means the `RawVec` is overfull.
 | ||||
|             return Err(CapacityOverflow.into()); | ||||
|         } | ||||
| 
 | ||||
|         // Nothing we can really do about these checks, sadly.
 | ||||
|         let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; | ||||
| 
 | ||||
|         // This guarantees exponential growth. The doubling cannot overflow
 | ||||
|         // because `cap <= isize::MAX` and the type of `cap` is `usize`.
 | ||||
|         let cap = cmp::max(self.cap.0 * 2, required_cap); | ||||
|         let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); | ||||
| 
 | ||||
|         let new_layout = Layout::array::<T>(cap); | ||||
| 
 | ||||
|         // `finish_grow` is non-generic over `T`.
 | ||||
|         let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; | ||||
|         // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
 | ||||
|         unsafe { self.set_ptr_and_cap(ptr, cap) }; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     // The constraints on this method are much the same as those on
 | ||||
|     // `grow_amortized`, but this method is usually instantiated less often so
 | ||||
|     // it's less critical.
 | ||||
|     fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { | ||||
|         if T::IS_ZST { | ||||
|             // Since we return a capacity of `usize::MAX` when the type size is
 | ||||
|             // 0, getting to here necessarily means the `RawVec` is overfull.
 | ||||
|             return Err(CapacityOverflow.into()); | ||||
|         } | ||||
| 
 | ||||
|         let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; | ||||
|         let new_layout = Layout::array::<T>(cap); | ||||
| 
 | ||||
|         // `finish_grow` is non-generic over `T`.
 | ||||
|         let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; | ||||
|         // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
 | ||||
|         unsafe { | ||||
|             self.set_ptr_and_cap(ptr, cap); | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { | ||||
|         assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); | ||||
| 
 | ||||
|         let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; | ||||
|         // See current_memory() why this assert is here
 | ||||
|         let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) }; | ||||
| 
 | ||||
|         // If shrinking to 0, deallocate the buffer. We don't reach this point
 | ||||
|         // for the T::IS_ZST case since current_memory() will have returned
 | ||||
|         // None.
 | ||||
|         if cap == 0 { | ||||
|             unsafe { self.alloc.deallocate(ptr, layout) }; | ||||
|             self.ptr = Unique::dangling(); | ||||
|             self.cap = Cap::ZERO; | ||||
|         } else { | ||||
|             let ptr = unsafe { | ||||
|                 // `Layout::array` cannot overflow here because it would have
 | ||||
|                 // overflowed earlier when capacity was larger.
 | ||||
|                 let new_size = mem::size_of::<T>().unchecked_mul(cap); | ||||
|                 let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); | ||||
|                 self.alloc | ||||
|                     .shrink(ptr, layout, new_layout) | ||||
|                     .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? | ||||
|             }; | ||||
|             // SAFETY: if the allocation is valid, then the capacity is too
 | ||||
|             unsafe { | ||||
|                 self.set_ptr_and_cap(ptr, cap); | ||||
|             } | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // This function is outside `RawVec` to minimize compile times. See the comment
 | ||||
| // above `RawVec::grow_amortized` for details. (The `A` parameter isn't
 | ||||
| // significant, because the number of different `A` types seen in practice is
 | ||||
| // much smaller than the number of `T` types.)
 | ||||
| #[inline(never)] | ||||
| fn finish_grow<A>( | ||||
|     new_layout: Result<Layout, LayoutError>, | ||||
|     current_memory: Option<(NonNull<u8>, Layout)>, | ||||
|     alloc: &mut A, | ||||
| ) -> Result<NonNull<[u8]>, TryReserveError> | ||||
| where | ||||
|     A: Allocator, | ||||
| { | ||||
|     // Check for the error here to minimize the size of `RawVec::grow_*`.
 | ||||
|     let new_layout = new_layout.map_err(|_| CapacityOverflow)?; | ||||
| 
 | ||||
|     alloc_guard(new_layout.size())?; | ||||
| 
 | ||||
|     let memory = if let Some((ptr, old_layout)) = current_memory { | ||||
|         debug_assert_eq!(old_layout.align(), new_layout.align()); | ||||
|         unsafe { | ||||
|             // The allocator checks for alignment equality
 | ||||
|             hint::assert_unchecked(old_layout.align() == new_layout.align()); | ||||
|             alloc.grow(ptr, old_layout, new_layout) | ||||
|         } | ||||
|     } else { | ||||
|         alloc.allocate(new_layout) | ||||
|     }; | ||||
| 
 | ||||
|     memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) | ||||
| } | ||||
| 
 | ||||
| unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> { | ||||
|     /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
 | ||||
|     fn drop(&mut self) { | ||||
|         if let Some((ptr, layout)) = self.current_memory() { | ||||
|             unsafe { self.alloc.deallocate(ptr, layout) } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // Central function for reserve error handling.
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[inline] | ||||
| fn handle_reserve(result: Result<(), TryReserveError>) { | ||||
|     match result.map_err(|e| e.kind()) { | ||||
|         Err(CapacityOverflow) => capacity_overflow(), | ||||
|         Err(AllocError { layout, .. }) => handle_alloc_error(layout), | ||||
|         Ok(()) => { /* yay */ } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // We need to guarantee the following:
 | ||||
| // * We don't ever allocate `> isize::MAX` byte-size objects.
 | ||||
| // * We don't overflow `usize::MAX` and actually allocate too little.
 | ||||
| //
 | ||||
| // On 64-bit we just need to check for overflow since trying to allocate
 | ||||
| // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
 | ||||
| // an extra guard for this in case we're running on a platform which can use
 | ||||
| // all 4GB in user-space, e.g., PAE or x32.
 | ||||
| #[inline] | ||||
| fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { | ||||
|     if usize::BITS < 64 && alloc_size > isize::MAX as usize { | ||||
|         Err(CapacityOverflow.into()) | ||||
|     } else { | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // One central function responsible for reporting capacity overflows. This'll
 | ||||
| // ensure that the code generation related to these panics is minimal as there's
 | ||||
| // only one location which panics rather than a bunch throughout the module.
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] | ||||
| fn capacity_overflow() -> ! { | ||||
|     panic!("capacity overflow"); | ||||
| } | ||||
|  | @ -1,890 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| //! Utilities for the slice primitive type.
 | ||||
| //!
 | ||||
| //! *[See also the slice primitive type](slice).*
 | ||||
| //!
 | ||||
| //! Most of the structs in this module are iterator types which can only be created
 | ||||
| //! using a certain function. For example, `slice.iter()` yields an [`Iter`].
 | ||||
| //!
 | ||||
| //! A few functions are provided to create a slice from a value reference
 | ||||
| //! or from a raw pointer.
 | ||||
| #![stable(feature = "rust1", since = "1.0.0")] | ||||
| // Many of the usings in this module are only used in the test configuration.
 | ||||
| // It's cleaner to just turn off the unused_imports warning than to fix them.
 | ||||
| #![cfg_attr(test, allow(unused_imports, dead_code))] | ||||
| 
 | ||||
| use core::borrow::{Borrow, BorrowMut}; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use core::cmp::Ordering::{self, Less}; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use core::mem::{self, SizedTypeProperties}; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use core::ptr; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use core::slice::sort; | ||||
| 
 | ||||
| use crate::alloc::Allocator; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use crate::alloc::{self, Global}; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use crate::borrow::ToOwned; | ||||
| use crate::boxed::Box; | ||||
| use crate::vec::Vec; | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests; | ||||
| 
 | ||||
| #[unstable(feature = "slice_range", issue = "76393")] | ||||
| pub use core::slice::range; | ||||
| #[unstable(feature = "array_chunks", issue = "74985")] | ||||
| pub use core::slice::ArrayChunks; | ||||
| #[unstable(feature = "array_chunks", issue = "74985")] | ||||
| pub use core::slice::ArrayChunksMut; | ||||
| #[unstable(feature = "array_windows", issue = "75027")] | ||||
| pub use core::slice::ArrayWindows; | ||||
| #[stable(feature = "inherent_ascii_escape", since = "1.60.0")] | ||||
| pub use core::slice::EscapeAscii; | ||||
| #[stable(feature = "slice_get_slice", since = "1.28.0")] | ||||
| pub use core::slice::SliceIndex; | ||||
| #[stable(feature = "from_ref", since = "1.28.0")] | ||||
| pub use core::slice::{from_mut, from_ref}; | ||||
| #[unstable(feature = "slice_from_ptr_range", issue = "89792")] | ||||
| pub use core::slice::{from_mut_ptr_range, from_ptr_range}; | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub use core::slice::{from_raw_parts, from_raw_parts_mut}; | ||||
| #[stable(feature = "slice_group_by", since = "1.77.0")] | ||||
| pub use core::slice::{ChunkBy, ChunkByMut}; | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub use core::slice::{Chunks, Windows}; | ||||
| #[stable(feature = "chunks_exact", since = "1.31.0")] | ||||
| pub use core::slice::{ChunksExact, ChunksExactMut}; | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub use core::slice::{ChunksMut, Split, SplitMut}; | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub use core::slice::{Iter, IterMut}; | ||||
| #[stable(feature = "rchunks", since = "1.31.0")] | ||||
| pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut}; | ||||
| #[stable(feature = "slice_rsplit", since = "1.27.0")] | ||||
| pub use core::slice::{RSplit, RSplitMut}; | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut}; | ||||
| #[stable(feature = "split_inclusive", since = "1.51.0")] | ||||
| pub use core::slice::{SplitInclusive, SplitInclusiveMut}; | ||||
| 
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| // Basic slice extension methods
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
| // HACK(japaric) needed for the implementation of `vec!` macro during testing
 | ||||
| // N.B., see the `hack` module in this file for more details.
 | ||||
| #[cfg(test)] | ||||
| pub use hack::into_vec; | ||||
| 
 | ||||
| // HACK(japaric) needed for the implementation of `Vec::clone` during testing
 | ||||
| // N.B., see the `hack` module in this file for more details.
 | ||||
| #[cfg(test)] | ||||
| pub use hack::to_vec; | ||||
| 
 | ||||
| // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
 | ||||
| // functions are actually methods that are in `impl [T]` but not in
 | ||||
| // `core::slice::SliceExt` - we need to supply these functions for the
 | ||||
| // `test_permutations` test
 | ||||
| pub(crate) mod hack { | ||||
|     use core::alloc::Allocator; | ||||
| 
 | ||||
|     use crate::boxed::Box; | ||||
|     use crate::vec::Vec; | ||||
| 
 | ||||
|     // We shouldn't add inline attribute to this since this is used in
 | ||||
|     // `vec!` macro mostly and causes perf regression. See #71204 for
 | ||||
|     // discussion and perf results.
 | ||||
|     pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> { | ||||
|         unsafe { | ||||
|             let len = b.len(); | ||||
|             let (b, alloc) = Box::into_raw_with_allocator(b); | ||||
|             Vec::from_raw_parts_in(b as *mut T, len, len, alloc) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[inline] | ||||
|     pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> { | ||||
|         T::to_vec(s, alloc) | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     pub trait ConvertVec { | ||||
|         fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> | ||||
|         where | ||||
|             Self: Sized; | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     impl<T: Clone> ConvertVec for T { | ||||
|         #[inline] | ||||
|         default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> { | ||||
|             struct DropGuard<'a, T, A: Allocator> { | ||||
|                 vec: &'a mut Vec<T, A>, | ||||
|                 num_init: usize, | ||||
|             } | ||||
|             impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> { | ||||
|                 #[inline] | ||||
|                 fn drop(&mut self) { | ||||
|                     // SAFETY:
 | ||||
|                     // items were marked initialized in the loop below
 | ||||
|                     unsafe { | ||||
|                         self.vec.set_len(self.num_init); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|             let mut vec = Vec::with_capacity_in(s.len(), alloc); | ||||
|             let mut guard = DropGuard { vec: &mut vec, num_init: 0 }; | ||||
|             let slots = guard.vec.spare_capacity_mut(); | ||||
|             // .take(slots.len()) is necessary for LLVM to remove bounds checks
 | ||||
|             // and has better codegen than zip.
 | ||||
|             for (i, b) in s.iter().enumerate().take(slots.len()) { | ||||
|                 guard.num_init = i; | ||||
|                 slots[i].write(b.clone()); | ||||
|             } | ||||
|             core::mem::forget(guard); | ||||
|             // SAFETY:
 | ||||
|             // the vec was allocated and initialized above to at least this length.
 | ||||
|             unsafe { | ||||
|                 vec.set_len(s.len()); | ||||
|             } | ||||
|             vec | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     impl<T: Copy> ConvertVec for T { | ||||
|         #[inline] | ||||
|         fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> { | ||||
|             let mut v = Vec::with_capacity_in(s.len(), alloc); | ||||
|             // SAFETY:
 | ||||
|             // allocated above with the capacity of `s`, and initialize to `s.len()` in
 | ||||
|             // ptr::copy_to_non_overlapping below.
 | ||||
|             unsafe { | ||||
|                 s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len()); | ||||
|                 v.set_len(s.len()); | ||||
|             } | ||||
|             v | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(test))] | ||||
| impl<T> [T] { | ||||
|     /// Sorts the slice.
 | ||||
|     ///
 | ||||
|     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
 | ||||
|     ///
 | ||||
|     /// When applicable, unstable sorting is preferred because it is generally faster than stable
 | ||||
|     /// sorting and it doesn't allocate auxiliary memory.
 | ||||
|     /// See [`sort_unstable`](slice::sort_unstable).
 | ||||
|     ///
 | ||||
|     /// # Current implementation
 | ||||
|     ///
 | ||||
|     /// The current algorithm is an adaptive, iterative merge sort inspired by
 | ||||
|     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
 | ||||
|     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
 | ||||
|     /// two or more sorted sequences concatenated one after another.
 | ||||
|     ///
 | ||||
|     /// Also, it allocates temporary storage half the size of `self`, but for short slices a
 | ||||
|     /// non-allocating insertion sort is used instead.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let mut v = [-5, 4, 1, -3, 2];
 | ||||
|     ///
 | ||||
|     /// v.sort();
 | ||||
|     /// assert!(v == [-5, -3, 1, 2, 4]);
 | ||||
|     /// ```
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     #[inline] | ||||
|     pub fn sort(&mut self) | ||||
|     where | ||||
|         T: Ord, | ||||
|     { | ||||
|         stable_sort(self, T::lt); | ||||
|     } | ||||
| 
 | ||||
|     /// Sorts the slice with a comparator function.
 | ||||
|     ///
 | ||||
|     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
 | ||||
|     ///
 | ||||
|     /// The comparator function must define a total ordering for the elements in the slice. If
 | ||||
|     /// the ordering is not total, the order of the elements is unspecified. An order is a
 | ||||
|     /// total order if it is (for all `a`, `b` and `c`):
 | ||||
|     ///
 | ||||
|     /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
 | ||||
|     /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
 | ||||
|     ///
 | ||||
|     /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
 | ||||
|     /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
 | ||||
|     /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
 | ||||
|     /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
 | ||||
|     /// ```
 | ||||
|     ///
 | ||||
|     /// When applicable, unstable sorting is preferred because it is generally faster than stable
 | ||||
|     /// sorting and it doesn't allocate auxiliary memory.
 | ||||
|     /// See [`sort_unstable_by`](slice::sort_unstable_by).
 | ||||
|     ///
 | ||||
|     /// # Current implementation
 | ||||
|     ///
 | ||||
|     /// The current algorithm is an adaptive, iterative merge sort inspired by
 | ||||
|     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
 | ||||
|     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
 | ||||
|     /// two or more sorted sequences concatenated one after another.
 | ||||
|     ///
 | ||||
|     /// Also, it allocates temporary storage half the size of `self`, but for short slices a
 | ||||
|     /// non-allocating insertion sort is used instead.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let mut v = [5, 4, 1, 3, 2];
 | ||||
|     /// v.sort_by(|a, b| a.cmp(b));
 | ||||
|     /// assert!(v == [1, 2, 3, 4, 5]);
 | ||||
|     ///
 | ||||
|     /// // reverse sorting
 | ||||
|     /// v.sort_by(|a, b| b.cmp(a));
 | ||||
|     /// assert!(v == [5, 4, 3, 2, 1]);
 | ||||
|     /// ```
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     #[inline] | ||||
|     pub fn sort_by<F>(&mut self, mut compare: F) | ||||
|     where | ||||
|         F: FnMut(&T, &T) -> Ordering, | ||||
|     { | ||||
|         stable_sort(self, |a, b| compare(a, b) == Less); | ||||
|     } | ||||
| 
 | ||||
|     /// Sorts the slice with a key extraction function.
 | ||||
|     ///
 | ||||
|     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
 | ||||
|     /// worst-case, where the key function is *O*(*m*).
 | ||||
|     ///
 | ||||
|     /// For expensive key functions (e.g. functions that are not simple property accesses or
 | ||||
|     /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
 | ||||
|     /// significantly faster, as it does not recompute element keys.
 | ||||
|     ///
 | ||||
|     /// When applicable, unstable sorting is preferred because it is generally faster than stable
 | ||||
|     /// sorting and it doesn't allocate auxiliary memory.
 | ||||
|     /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
 | ||||
|     ///
 | ||||
|     /// # Current implementation
 | ||||
|     ///
 | ||||
|     /// The current algorithm is an adaptive, iterative merge sort inspired by
 | ||||
|     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
 | ||||
|     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
 | ||||
|     /// two or more sorted sequences concatenated one after another.
 | ||||
|     ///
 | ||||
|     /// Also, it allocates temporary storage half the size of `self`, but for short slices a
 | ||||
|     /// non-allocating insertion sort is used instead.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let mut v = [-5i32, 4, 1, -3, 2];
 | ||||
|     ///
 | ||||
|     /// v.sort_by_key(|k| k.abs());
 | ||||
|     /// assert!(v == [1, 2, -3, 4, -5]);
 | ||||
|     /// ```
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "slice_sort_by_key", since = "1.7.0")] | ||||
|     #[inline] | ||||
|     pub fn sort_by_key<K, F>(&mut self, mut f: F) | ||||
|     where | ||||
|         F: FnMut(&T) -> K, | ||||
|         K: Ord, | ||||
|     { | ||||
|         stable_sort(self, |a, b| f(a).lt(&f(b))); | ||||
|     } | ||||
| 
 | ||||
|     /// Sorts the slice with a key extraction function.
 | ||||
|     ///
 | ||||
|     /// During sorting, the key function is called at most once per element, by using
 | ||||
|     /// temporary storage to remember the results of key evaluation.
 | ||||
|     /// The order of calls to the key function is unspecified and may change in future versions
 | ||||
|     /// of the standard library.
 | ||||
|     ///
 | ||||
|     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
 | ||||
|     /// worst-case, where the key function is *O*(*m*).
 | ||||
|     ///
 | ||||
|     /// For simple key functions (e.g., functions that are property accesses or
 | ||||
|     /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
 | ||||
|     /// faster.
 | ||||
|     ///
 | ||||
|     /// # Current implementation
 | ||||
|     ///
 | ||||
|     /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
 | ||||
|     /// which combines the fast average case of randomized quicksort with the fast worst case of
 | ||||
|     /// heapsort, while achieving linear time on slices with certain patterns. It uses some
 | ||||
|     /// randomization to avoid degenerate cases, but with a fixed seed to always provide
 | ||||
|     /// deterministic behavior.
 | ||||
|     ///
 | ||||
|     /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
 | ||||
|     /// length of the slice.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let mut v = [-5i32, 4, 32, -3, 2];
 | ||||
|     ///
 | ||||
|     /// v.sort_by_cached_key(|k| k.to_string());
 | ||||
|     /// assert!(v == [-3, -5, 2, 32, 4]);
 | ||||
|     /// ```
 | ||||
|     ///
 | ||||
|     /// [pdqsort]: https://github.com/orlp/pdqsort
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")] | ||||
|     #[inline] | ||||
|     pub fn sort_by_cached_key<K, F>(&mut self, f: F) | ||||
|     where | ||||
|         F: FnMut(&T) -> K, | ||||
|         K: Ord, | ||||
|     { | ||||
|         // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
 | ||||
|         macro_rules! sort_by_key { | ||||
|             ($t:ty, $slice:ident, $f:ident) => {{ | ||||
|                 let mut indices: Vec<_> = | ||||
|                     $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect(); | ||||
|                 // The elements of `indices` are unique, as they are indexed, so any sort will be
 | ||||
|                 // stable with respect to the original slice. We use `sort_unstable` here because
 | ||||
|                 // it requires less memory allocation.
 | ||||
|                 indices.sort_unstable(); | ||||
|                 for i in 0..$slice.len() { | ||||
|                     let mut index = indices[i].1; | ||||
|                     while (index as usize) < i { | ||||
|                         index = indices[index as usize].1; | ||||
|                     } | ||||
|                     indices[i].1 = index; | ||||
|                     $slice.swap(i, index as usize); | ||||
|                 } | ||||
|             }}; | ||||
|         } | ||||
| 
 | ||||
|         let sz_u8 = mem::size_of::<(K, u8)>(); | ||||
|         let sz_u16 = mem::size_of::<(K, u16)>(); | ||||
|         let sz_u32 = mem::size_of::<(K, u32)>(); | ||||
|         let sz_usize = mem::size_of::<(K, usize)>(); | ||||
| 
 | ||||
|         let len = self.len(); | ||||
|         if len < 2 { | ||||
|             return; | ||||
|         } | ||||
|         if sz_u8 < sz_u16 && len <= (u8::MAX as usize) { | ||||
|             return sort_by_key!(u8, self, f); | ||||
|         } | ||||
|         if sz_u16 < sz_u32 && len <= (u16::MAX as usize) { | ||||
|             return sort_by_key!(u16, self, f); | ||||
|         } | ||||
|         if sz_u32 < sz_usize && len <= (u32::MAX as usize) { | ||||
|             return sort_by_key!(u32, self, f); | ||||
|         } | ||||
|         sort_by_key!(usize, self, f) | ||||
|     } | ||||
| 
 | ||||
|     /// Copies `self` into a new `Vec`.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let s = [10, 40, 30];
 | ||||
|     /// let x = s.to_vec();
 | ||||
|     /// // Here, `s` and `x` can be modified independently.
 | ||||
|     /// ```
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[rustc_conversion_suggestion] | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     #[inline] | ||||
|     pub fn to_vec(&self) -> Vec<T> | ||||
|     where | ||||
|         T: Clone, | ||||
|     { | ||||
|         self.to_vec_in(Global) | ||||
|     } | ||||
| 
 | ||||
|     /// Copies `self` into a new `Vec` with an allocator.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// #![feature(allocator_api)]
 | ||||
|     ///
 | ||||
|     /// use std::alloc::System;
 | ||||
|     ///
 | ||||
|     /// let s = [10, 40, 30];
 | ||||
|     /// let x = s.to_vec_in(System);
 | ||||
|     /// // Here, `s` and `x` can be modified independently.
 | ||||
|     /// ```
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[inline] | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] | ||||
|     pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A> | ||||
|     where | ||||
|         T: Clone, | ||||
|     { | ||||
|         // N.B., see the `hack` module in this file for more details.
 | ||||
|         hack::to_vec(self, alloc) | ||||
|     } | ||||
| 
 | ||||
|     /// Converts `self` into a vector without clones or allocation.
 | ||||
|     ///
 | ||||
|     /// The resulting vector can be converted back into a box via
 | ||||
|     /// `Vec<T>`'s `into_boxed_slice` method.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let s: Box<[i32]> = Box::new([10, 40, 30]);
 | ||||
|     /// let x = s.into_vec();
 | ||||
|     /// // `s` cannot be used anymore because it has been converted into `x`.
 | ||||
|     ///
 | ||||
|     /// assert_eq!(x, vec![10, 40, 30]);
 | ||||
|     /// ```
 | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     #[inline] | ||||
|     pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> { | ||||
|         // N.B., see the `hack` module in this file for more details.
 | ||||
|         hack::into_vec(self) | ||||
|     } | ||||
| 
 | ||||
|     /// Creates a vector by copying a slice `n` times.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// This function will panic if the capacity would overflow.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// Basic usage:
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
 | ||||
|     /// ```
 | ||||
|     ///
 | ||||
|     /// A panic upon overflow:
 | ||||
|     ///
 | ||||
|     /// ```should_panic
 | ||||
|     /// // this will panic at runtime
 | ||||
|     /// b"0123456789abcdef".repeat(usize::MAX);
 | ||||
|     /// ```
 | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[stable(feature = "repeat_generic_slice", since = "1.40.0")] | ||||
|     pub fn repeat(&self, n: usize) -> Vec<T> | ||||
|     where | ||||
|         T: Copy, | ||||
|     { | ||||
|         if n == 0 { | ||||
|             return Vec::new(); | ||||
|         } | ||||
| 
 | ||||
|         // If `n` is larger than zero, it can be split as
 | ||||
|         // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
 | ||||
|         // `2^expn` is the number represented by the leftmost '1' bit of `n`,
 | ||||
|         // and `rem` is the remaining part of `n`.
 | ||||
| 
 | ||||
|         // Using `Vec` to access `set_len()`.
 | ||||
|         let capacity = self.len().checked_mul(n).expect("capacity overflow"); | ||||
|         let mut buf = Vec::with_capacity(capacity); | ||||
| 
 | ||||
|         // `2^expn` repetition is done by doubling `buf` `expn`-times.
 | ||||
|         buf.extend(self); | ||||
|         { | ||||
|             let mut m = n >> 1; | ||||
|             // If `m > 0`, there are remaining bits up to the leftmost '1'.
 | ||||
|             while m > 0 { | ||||
|                 // `buf.extend(buf)`:
 | ||||
|                 unsafe { | ||||
|                     ptr::copy_nonoverlapping( | ||||
|                         buf.as_ptr(), | ||||
|                         (buf.as_mut_ptr() as *mut T).add(buf.len()), | ||||
|                         buf.len(), | ||||
|                     ); | ||||
|                     // `buf` has capacity of `self.len() * n`.
 | ||||
|                     let buf_len = buf.len(); | ||||
|                     buf.set_len(buf_len * 2); | ||||
|                 } | ||||
| 
 | ||||
|                 m >>= 1; | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         // `rem` (`= n - 2^expn`) repetition is done by copying
 | ||||
|         // first `rem` repetitions from `buf` itself.
 | ||||
|         let rem_len = capacity - buf.len(); // `self.len() * rem`
 | ||||
|         if rem_len > 0 { | ||||
|             // `buf.extend(buf[0 .. rem_len])`:
 | ||||
|             unsafe { | ||||
|                 // This is non-overlapping since `2^expn > rem`.
 | ||||
|                 ptr::copy_nonoverlapping( | ||||
|                     buf.as_ptr(), | ||||
|                     (buf.as_mut_ptr() as *mut T).add(buf.len()), | ||||
|                     rem_len, | ||||
|                 ); | ||||
|                 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
 | ||||
|                 buf.set_len(capacity); | ||||
|             } | ||||
|         } | ||||
|         buf | ||||
|     } | ||||
| 
 | ||||
|     /// Flattens a slice of `T` into a single value `Self::Output`.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// assert_eq!(["hello", "world"].concat(), "helloworld");
 | ||||
|     /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
 | ||||
|     /// ```
 | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output | ||||
|     where | ||||
|         Self: Concat<Item>, | ||||
|     { | ||||
|         Concat::concat(self) | ||||
|     } | ||||
| 
 | ||||
|     /// Flattens a slice of `T` into a single value `Self::Output`, placing a
 | ||||
|     /// given separator between each.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// assert_eq!(["hello", "world"].join(" "), "hello world");
 | ||||
|     /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
 | ||||
|     /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
 | ||||
|     /// ```
 | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "rename_connect_to_join", since = "1.3.0")] | ||||
|     pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output | ||||
|     where | ||||
|         Self: Join<Separator>, | ||||
|     { | ||||
|         Join::join(self, sep) | ||||
|     } | ||||
| 
 | ||||
|     /// Flattens a slice of `T` into a single value `Self::Output`, placing a
 | ||||
|     /// given separator between each.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// # #![allow(deprecated)]
 | ||||
|     /// assert_eq!(["hello", "world"].connect(" "), "hello world");
 | ||||
|     /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
 | ||||
|     /// ```
 | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[stable(feature = "rust1", since = "1.0.0")] | ||||
|     #[deprecated(since = "1.3.0", note = "renamed to join", suggestion = "join")] | ||||
|     pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output | ||||
|     where | ||||
|         Self: Join<Separator>, | ||||
|     { | ||||
|         Join::join(self, sep) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(test))] | ||||
| impl [u8] { | ||||
|     /// Returns a vector containing a copy of this slice where each byte
 | ||||
|     /// is mapped to its ASCII upper case equivalent.
 | ||||
|     ///
 | ||||
|     /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
 | ||||
|     /// but non-ASCII letters are unchanged.
 | ||||
|     ///
 | ||||
|     /// To uppercase the value in-place, use [`make_ascii_uppercase`].
 | ||||
|     ///
 | ||||
|     /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[must_use = "this returns the uppercase bytes as a new Vec, \ | ||||
|                   without modifying the original"]
 | ||||
|     #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] | ||||
|     #[inline] | ||||
|     pub fn to_ascii_uppercase(&self) -> Vec<u8> { | ||||
|         let mut me = self.to_vec(); | ||||
|         me.make_ascii_uppercase(); | ||||
|         me | ||||
|     } | ||||
| 
 | ||||
|     /// Returns a vector containing a copy of this slice where each byte
 | ||||
|     /// is mapped to its ASCII lower case equivalent.
 | ||||
|     ///
 | ||||
|     /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
 | ||||
|     /// but non-ASCII letters are unchanged.
 | ||||
|     ///
 | ||||
|     /// To lowercase the value in-place, use [`make_ascii_lowercase`].
 | ||||
|     ///
 | ||||
|     /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[rustc_allow_incoherent_impl] | ||||
|     #[must_use = "this returns the lowercase bytes as a new Vec, \ | ||||
|                   without modifying the original"]
 | ||||
|     #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] | ||||
|     #[inline] | ||||
|     pub fn to_ascii_lowercase(&self) -> Vec<u8> { | ||||
|         let mut me = self.to_vec(); | ||||
|         me.make_ascii_lowercase(); | ||||
|         me | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| // Extension traits for slices over specific kinds of data
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
| /// Helper trait for [`[T]::concat`](slice::concat).
 | ||||
| ///
 | ||||
| /// Note: the `Item` type parameter is not used in this trait,
 | ||||
| /// but it allows impls to be more generic.
 | ||||
| /// Without it, we get this error:
 | ||||
| ///
 | ||||
| /// ```error
 | ||||
| /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
 | ||||
| ///    --> library/alloc/src/slice.rs:608:6
 | ||||
| ///     |
 | ||||
| /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
 | ||||
| ///     |      ^ unconstrained type parameter
 | ||||
| /// ```
 | ||||
| ///
 | ||||
| /// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
 | ||||
| /// such that multiple `T` types would apply:
 | ||||
| ///
 | ||||
| /// ```
 | ||||
| /// # #[allow(dead_code)]
 | ||||
| /// pub struct Foo(Vec<u32>, Vec<String>);
 | ||||
| ///
 | ||||
| /// impl std::borrow::Borrow<[u32]> for Foo {
 | ||||
| ///     fn borrow(&self) -> &[u32] { &self.0 }
 | ||||
| /// }
 | ||||
| ///
 | ||||
| /// impl std::borrow::Borrow<[String]> for Foo {
 | ||||
| ///     fn borrow(&self) -> &[String] { &self.1 }
 | ||||
| /// }
 | ||||
| /// ```
 | ||||
| #[unstable(feature = "slice_concat_trait", issue = "27747")] | ||||
| pub trait Concat<Item: ?Sized> { | ||||
|     #[unstable(feature = "slice_concat_trait", issue = "27747")] | ||||
|     /// The resulting type after concatenation
 | ||||
|     type Output; | ||||
| 
 | ||||
|     /// Implementation of [`[T]::concat`](slice::concat)
 | ||||
|     #[unstable(feature = "slice_concat_trait", issue = "27747")] | ||||
|     fn concat(slice: &Self) -> Self::Output; | ||||
| } | ||||
| 
 | ||||
| /// Helper trait for [`[T]::join`](slice::join)
 | ||||
| #[unstable(feature = "slice_concat_trait", issue = "27747")] | ||||
| pub trait Join<Separator> { | ||||
|     #[unstable(feature = "slice_concat_trait", issue = "27747")] | ||||
|     /// The resulting type after concatenation
 | ||||
|     type Output; | ||||
| 
 | ||||
|     /// Implementation of [`[T]::join`](slice::join)
 | ||||
|     #[unstable(feature = "slice_concat_trait", issue = "27747")] | ||||
|     fn join(slice: &Self, sep: Separator) -> Self::Output; | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[unstable(feature = "slice_concat_ext", issue = "27747")] | ||||
| impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] { | ||||
|     type Output = Vec<T>; | ||||
| 
 | ||||
|     fn concat(slice: &Self) -> Vec<T> { | ||||
|         let size = slice.iter().map(|slice| slice.borrow().len()).sum(); | ||||
|         let mut result = Vec::with_capacity(size); | ||||
|         for v in slice { | ||||
|             result.extend_from_slice(v.borrow()) | ||||
|         } | ||||
|         result | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[unstable(feature = "slice_concat_ext", issue = "27747")] | ||||
| impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] { | ||||
|     type Output = Vec<T>; | ||||
| 
 | ||||
|     fn join(slice: &Self, sep: &T) -> Vec<T> { | ||||
|         let mut iter = slice.iter(); | ||||
|         let first = match iter.next() { | ||||
|             Some(first) => first, | ||||
|             None => return vec![], | ||||
|         }; | ||||
|         let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1; | ||||
|         let mut result = Vec::with_capacity(size); | ||||
|         result.extend_from_slice(first.borrow()); | ||||
| 
 | ||||
|         for v in iter { | ||||
|             result.push(sep.clone()); | ||||
|             result.extend_from_slice(v.borrow()) | ||||
|         } | ||||
|         result | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[unstable(feature = "slice_concat_ext", issue = "27747")] | ||||
| impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] { | ||||
|     type Output = Vec<T>; | ||||
| 
 | ||||
|     fn join(slice: &Self, sep: &[T]) -> Vec<T> { | ||||
|         let mut iter = slice.iter(); | ||||
|         let first = match iter.next() { | ||||
|             Some(first) => first, | ||||
|             None => return vec![], | ||||
|         }; | ||||
|         let size = | ||||
|             slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1); | ||||
|         let mut result = Vec::with_capacity(size); | ||||
|         result.extend_from_slice(first.borrow()); | ||||
| 
 | ||||
|         for v in iter { | ||||
|             result.extend_from_slice(sep); | ||||
|             result.extend_from_slice(v.borrow()) | ||||
|         } | ||||
|         result | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| // Standard trait implementations for slices
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> { | ||||
|     fn borrow(&self) -> &[T] { | ||||
|         &self[..] | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> { | ||||
|     fn borrow_mut(&mut self) -> &mut [T] { | ||||
|         &mut self[..] | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // Specializable trait for implementing ToOwned::clone_into. This is
 | ||||
| // public in the crate and has the Allocator parameter so that
 | ||||
| // vec::clone_from use it too.
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| pub(crate) trait SpecCloneIntoVec<T, A: Allocator> { | ||||
|     fn clone_into(&self, target: &mut Vec<T, A>); | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] { | ||||
|     default fn clone_into(&self, target: &mut Vec<T, A>) { | ||||
|         // drop anything in target that will not be overwritten
 | ||||
|         target.truncate(self.len()); | ||||
| 
 | ||||
|         // target.len <= self.len due to the truncate above, so the
 | ||||
|         // slices here are always in-bounds.
 | ||||
|         let (init, tail) = self.split_at(target.len()); | ||||
| 
 | ||||
|         // reuse the contained values' allocations/resources.
 | ||||
|         target.clone_from_slice(init); | ||||
|         target.extend_from_slice(tail); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] { | ||||
|     fn clone_into(&self, target: &mut Vec<T, A>) { | ||||
|         target.clear(); | ||||
|         target.extend_from_slice(self); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| impl<T: Clone> ToOwned for [T] { | ||||
|     type Owned = Vec<T>; | ||||
|     #[cfg(not(test))] | ||||
|     fn to_owned(&self) -> Vec<T> { | ||||
|         self.to_vec() | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(test)] | ||||
|     fn to_owned(&self) -> Vec<T> { | ||||
|         hack::to_vec(self, Global) | ||||
|     } | ||||
| 
 | ||||
|     fn clone_into(&self, target: &mut Vec<T>) { | ||||
|         SpecCloneIntoVec::clone_into(self, target); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| // Sorting
 | ||||
| ////////////////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
| #[inline] | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| fn stable_sort<T, F>(v: &mut [T], mut is_less: F) | ||||
| where | ||||
|     F: FnMut(&T, &T) -> bool, | ||||
| { | ||||
|     if T::IS_ZST { | ||||
|         // Sorting has no meaningful behavior on zero-sized types. Do nothing.
 | ||||
|         return; | ||||
|     } | ||||
| 
 | ||||
|     let elem_alloc_fn = |len: usize| -> *mut T { | ||||
|         // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
 | ||||
|         // v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
 | ||||
|         // elements.
 | ||||
|         unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T } | ||||
|     }; | ||||
| 
 | ||||
|     let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| { | ||||
|         // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
 | ||||
|         // v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
 | ||||
|         // len.
 | ||||
|         unsafe { | ||||
|             alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked()); | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun { | ||||
|         // SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
 | ||||
|         // obscene length or 0.
 | ||||
|         unsafe { | ||||
|             alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked()) | ||||
|                 as *mut sort::TimSortRun | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| { | ||||
|         // SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
 | ||||
|         // len.
 | ||||
|         unsafe { | ||||
|             alloc::dealloc( | ||||
|                 buf_ptr as *mut u8, | ||||
|                 alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(), | ||||
|             ); | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn); | ||||
| } | ||||
|  | @ -1,255 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| use crate::alloc::{Allocator, Global}; | ||||
| use core::fmt; | ||||
| use core::iter::{FusedIterator, TrustedLen}; | ||||
| use core::mem::{self, ManuallyDrop, SizedTypeProperties}; | ||||
| use core::ptr::{self, NonNull}; | ||||
| use core::slice::{self}; | ||||
| 
 | ||||
| use super::Vec; | ||||
| 
 | ||||
| /// A draining iterator for `Vec<T>`.
 | ||||
| ///
 | ||||
| /// This `struct` is created by [`Vec::drain`].
 | ||||
| /// See its documentation for more.
 | ||||
| ///
 | ||||
| /// # Example
 | ||||
| ///
 | ||||
| /// ```
 | ||||
| /// let mut v = vec![0, 1, 2];
 | ||||
| /// let iter: std::vec::Drain<'_, _> = v.drain(..);
 | ||||
| /// ```
 | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| pub struct Drain< | ||||
|     'a, | ||||
|     T: 'a, | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, | ||||
| > { | ||||
|     /// Index of tail to preserve
 | ||||
|     pub(super) tail_start: usize, | ||||
|     /// Length of tail
 | ||||
|     pub(super) tail_len: usize, | ||||
|     /// Current remaining range to remove
 | ||||
|     pub(super) iter: slice::Iter<'a, T>, | ||||
|     pub(super) vec: NonNull<Vec<T, A>>, | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "collection_debug", since = "1.17.0")] | ||||
| impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<'a, T, A: Allocator> Drain<'a, T, A> { | ||||
|     /// Returns the remaining items of this iterator as a slice.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let mut vec = vec!['a', 'b', 'c'];
 | ||||
|     /// let mut drain = vec.drain(..);
 | ||||
|     /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
 | ||||
|     /// let _ = drain.next().unwrap();
 | ||||
|     /// assert_eq!(drain.as_slice(), &['b', 'c']);
 | ||||
|     /// ```
 | ||||
|     #[must_use] | ||||
|     #[stable(feature = "vec_drain_as_slice", since = "1.46.0")] | ||||
|     pub fn as_slice(&self) -> &[T] { | ||||
|         self.iter.as_slice() | ||||
|     } | ||||
| 
 | ||||
|     /// Returns a reference to the underlying allocator.
 | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] | ||||
|     #[must_use] | ||||
|     #[inline] | ||||
|     pub fn allocator(&self) -> &A { | ||||
|         unsafe { self.vec.as_ref().allocator() } | ||||
|     } | ||||
| 
 | ||||
|     /// Keep unyielded elements in the source `Vec`.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// #![feature(drain_keep_rest)]
 | ||||
|     ///
 | ||||
|     /// let mut vec = vec!['a', 'b', 'c'];
 | ||||
|     /// let mut drain = vec.drain(..);
 | ||||
|     ///
 | ||||
|     /// assert_eq!(drain.next().unwrap(), 'a');
 | ||||
|     ///
 | ||||
|     /// // This call keeps 'b' and 'c' in the vec.
 | ||||
|     /// drain.keep_rest();
 | ||||
|     ///
 | ||||
|     /// // If we wouldn't call `keep_rest()`,
 | ||||
|     /// // `vec` would be empty.
 | ||||
|     /// assert_eq!(vec, ['b', 'c']);
 | ||||
|     /// ```
 | ||||
|     #[unstable(feature = "drain_keep_rest", issue = "101122")] | ||||
|     pub fn keep_rest(self) { | ||||
|         // At this moment layout looks like this:
 | ||||
|         //
 | ||||
|         // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
 | ||||
|         //        ^-- start         \_________/-- unyielded_len        \____/-- self.tail_len
 | ||||
|         //                          ^-- unyielded_ptr                  ^-- tail
 | ||||
|         //
 | ||||
|         // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
 | ||||
|         // Here we want to
 | ||||
|         // 1. Move [unyielded] to `start`
 | ||||
|         // 2. Move [tail] to a new start at `start + len(unyielded)`
 | ||||
|         // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
 | ||||
|         //    a. In case of ZST, this is the only thing we want to do
 | ||||
|         // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
 | ||||
|         let mut this = ManuallyDrop::new(self); | ||||
| 
 | ||||
|         unsafe { | ||||
|             let source_vec = this.vec.as_mut(); | ||||
| 
 | ||||
|             let start = source_vec.len(); | ||||
|             let tail = this.tail_start; | ||||
| 
 | ||||
|             let unyielded_len = this.iter.len(); | ||||
|             let unyielded_ptr = this.iter.as_slice().as_ptr(); | ||||
| 
 | ||||
|             // ZSTs have no identity, so we don't need to move them around.
 | ||||
|             if !T::IS_ZST { | ||||
|                 let start_ptr = source_vec.as_mut_ptr().add(start); | ||||
| 
 | ||||
|                 // memmove back unyielded elements
 | ||||
|                 if unyielded_ptr != start_ptr { | ||||
|                     let src = unyielded_ptr; | ||||
|                     let dst = start_ptr; | ||||
| 
 | ||||
|                     ptr::copy(src, dst, unyielded_len); | ||||
|                 } | ||||
| 
 | ||||
|                 // memmove back untouched tail
 | ||||
|                 if tail != (start + unyielded_len) { | ||||
|                     let src = source_vec.as_ptr().add(tail); | ||||
|                     let dst = start_ptr.add(unyielded_len); | ||||
|                     ptr::copy(src, dst, this.tail_len); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             source_vec.set_len(start + unyielded_len + this.tail_len); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "vec_drain_as_slice", since = "1.46.0")] | ||||
| impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> { | ||||
|     fn as_ref(&self) -> &[T] { | ||||
|         self.as_slice() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {} | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {} | ||||
| 
 | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| impl<T, A: Allocator> Iterator for Drain<'_, T, A> { | ||||
|     type Item = T; | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn next(&mut self) -> Option<T> { | ||||
|         self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) }) | ||||
|     } | ||||
| 
 | ||||
|     fn size_hint(&self) -> (usize, Option<usize>) { | ||||
|         self.iter.size_hint() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { | ||||
|     #[inline] | ||||
|     fn next_back(&mut self) -> Option<T> { | ||||
|         self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| impl<T, A: Allocator> Drop for Drain<'_, T, A> { | ||||
|     fn drop(&mut self) { | ||||
|         /// Moves back the un-`Drain`ed elements to restore the original `Vec`.
 | ||||
|         struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>); | ||||
| 
 | ||||
|         impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { | ||||
|             fn drop(&mut self) { | ||||
|                 if self.0.tail_len > 0 { | ||||
|                     unsafe { | ||||
|                         let source_vec = self.0.vec.as_mut(); | ||||
|                         // memmove back untouched tail, update to new length
 | ||||
|                         let start = source_vec.len(); | ||||
|                         let tail = self.0.tail_start; | ||||
|                         if tail != start { | ||||
|                             let src = source_vec.as_ptr().add(tail); | ||||
|                             let dst = source_vec.as_mut_ptr().add(start); | ||||
|                             ptr::copy(src, dst, self.0.tail_len); | ||||
|                         } | ||||
|                         source_vec.set_len(start + self.0.tail_len); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         let iter = mem::take(&mut self.iter); | ||||
|         let drop_len = iter.len(); | ||||
| 
 | ||||
|         let mut vec = self.vec; | ||||
| 
 | ||||
|         if T::IS_ZST { | ||||
|             // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
 | ||||
|             // this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
 | ||||
|             unsafe { | ||||
|                 let vec = vec.as_mut(); | ||||
|                 let old_len = vec.len(); | ||||
|                 vec.set_len(old_len + drop_len + self.tail_len); | ||||
|                 vec.truncate(old_len + self.tail_len); | ||||
|             } | ||||
| 
 | ||||
|             return; | ||||
|         } | ||||
| 
 | ||||
|         // ensure elements are moved back into their appropriate places, even when drop_in_place panics
 | ||||
|         let _guard = DropGuard(self); | ||||
| 
 | ||||
|         if drop_len == 0 { | ||||
|             return; | ||||
|         } | ||||
| 
 | ||||
|         // as_slice() must only be called when iter.len() is > 0 because
 | ||||
|         // it also gets touched by vec::Splice which may turn it into a dangling pointer
 | ||||
|         // which would make it and the vec pointer point to different allocations which would
 | ||||
|         // lead to invalid pointer arithmetic below.
 | ||||
|         let drop_ptr = iter.as_slice().as_ptr(); | ||||
| 
 | ||||
|         unsafe { | ||||
|             // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
 | ||||
|             // a pointer with mutable provenance is necessary. Therefore we must reconstruct
 | ||||
|             // it from the original vec but also avoid creating a &mut to the front since that could
 | ||||
|             // invalidate raw pointers to it which some unsafe code might rely on.
 | ||||
|             let vec_ptr = vec.as_mut().as_mut_ptr(); | ||||
|             let drop_offset = drop_ptr.sub_ptr(vec_ptr); | ||||
|             let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len); | ||||
|             ptr::drop_in_place(to_drop); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "drain", since = "1.6.0")] | ||||
| impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> { | ||||
|     fn is_empty(&self) -> bool { | ||||
|         self.iter.is_empty() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[unstable(feature = "trusted_len", issue = "37572")] | ||||
| unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {} | ||||
| 
 | ||||
| #[stable(feature = "fused", since = "1.26.0")] | ||||
| impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} | ||||
|  | @ -1,115 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| use crate::alloc::{Allocator, Global}; | ||||
| use core::ptr; | ||||
| use core::slice; | ||||
| 
 | ||||
| use super::Vec; | ||||
| 
 | ||||
| /// An iterator which uses a closure to determine if an element should be removed.
 | ||||
| ///
 | ||||
| /// This struct is created by [`Vec::extract_if`].
 | ||||
| /// See its documentation for more.
 | ||||
| ///
 | ||||
| /// # Example
 | ||||
| ///
 | ||||
| /// ```
 | ||||
| /// #![feature(extract_if)]
 | ||||
| ///
 | ||||
| /// let mut v = vec![0, 1, 2];
 | ||||
| /// let iter: std::vec::ExtractIf<'_, _, _> = v.extract_if(|x| *x % 2 == 0);
 | ||||
| /// ```
 | ||||
| #[unstable(feature = "extract_if", reason = "recently added", issue = "43244")] | ||||
| #[derive(Debug)] | ||||
| #[must_use = "iterators are lazy and do nothing unless consumed"] | ||||
| pub struct ExtractIf< | ||||
|     'a, | ||||
|     T, | ||||
|     F, | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, | ||||
| > where | ||||
|     F: FnMut(&mut T) -> bool, | ||||
| { | ||||
|     pub(super) vec: &'a mut Vec<T, A>, | ||||
|     /// The index of the item that will be inspected by the next call to `next`.
 | ||||
|     pub(super) idx: usize, | ||||
|     /// The number of items that have been drained (removed) thus far.
 | ||||
|     pub(super) del: usize, | ||||
|     /// The original length of `vec` prior to draining.
 | ||||
|     pub(super) old_len: usize, | ||||
|     /// The filter test predicate.
 | ||||
|     pub(super) pred: F, | ||||
| } | ||||
| 
 | ||||
| impl<T, F, A: Allocator> ExtractIf<'_, T, F, A> | ||||
| where | ||||
|     F: FnMut(&mut T) -> bool, | ||||
| { | ||||
|     /// Returns a reference to the underlying allocator.
 | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] | ||||
|     #[inline] | ||||
|     pub fn allocator(&self) -> &A { | ||||
|         self.vec.allocator() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[unstable(feature = "extract_if", reason = "recently added", issue = "43244")] | ||||
| impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A> | ||||
| where | ||||
|     F: FnMut(&mut T) -> bool, | ||||
| { | ||||
|     type Item = T; | ||||
| 
 | ||||
|     fn next(&mut self) -> Option<T> { | ||||
|         unsafe { | ||||
|             while self.idx < self.old_len { | ||||
|                 let i = self.idx; | ||||
|                 let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len); | ||||
|                 let drained = (self.pred)(&mut v[i]); | ||||
|                 // Update the index *after* the predicate is called. If the index
 | ||||
|                 // is updated prior and the predicate panics, the element at this
 | ||||
|                 // index would be leaked.
 | ||||
|                 self.idx += 1; | ||||
|                 if drained { | ||||
|                     self.del += 1; | ||||
|                     return Some(ptr::read(&v[i])); | ||||
|                 } else if self.del > 0 { | ||||
|                     let del = self.del; | ||||
|                     let src: *const T = &v[i]; | ||||
|                     let dst: *mut T = &mut v[i - del]; | ||||
|                     ptr::copy_nonoverlapping(src, dst, 1); | ||||
|                 } | ||||
|             } | ||||
|             None | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn size_hint(&self) -> (usize, Option<usize>) { | ||||
|         (0, Some(self.old_len - self.idx)) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[unstable(feature = "extract_if", reason = "recently added", issue = "43244")] | ||||
| impl<T, F, A: Allocator> Drop for ExtractIf<'_, T, F, A> | ||||
| where | ||||
|     F: FnMut(&mut T) -> bool, | ||||
| { | ||||
|     fn drop(&mut self) { | ||||
|         unsafe { | ||||
|             if self.idx < self.old_len && self.del > 0 { | ||||
|                 // This is a pretty messed up state, and there isn't really an
 | ||||
|                 // obviously right thing to do. We don't want to keep trying
 | ||||
|                 // to execute `pred`, so we just backshift all the unprocessed
 | ||||
|                 // elements and tell the vec that they still exist. The backshift
 | ||||
|                 // is required to prevent a double-drop of the last successfully
 | ||||
|                 // drained item prior to a panic in the predicate.
 | ||||
|                 let ptr = self.vec.as_mut_ptr(); | ||||
|                 let src = ptr.add(self.idx); | ||||
|                 let dst = src.sub(self.del); | ||||
|                 let tail_len = self.old_len - self.idx; | ||||
|                 src.copy_to(dst, tail_len); | ||||
|             } | ||||
|             self.vec.set_len(self.old_len - self.del); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | @ -1,484 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use super::AsVecIntoIter; | ||||
| use crate::alloc::{Allocator, Global}; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use crate::collections::VecDeque; | ||||
| use crate::raw_vec::RawVec; | ||||
| use core::array; | ||||
| use core::fmt; | ||||
| use core::iter::{ | ||||
|     FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen, | ||||
|     TrustedRandomAccessNoCoerce, | ||||
| }; | ||||
| use core::marker::PhantomData; | ||||
| use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; | ||||
| use core::num::NonZeroUsize; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use core::ops::Deref; | ||||
| use core::ptr::{self, NonNull}; | ||||
| use core::slice::{self}; | ||||
| 
 | ||||
| macro non_null { | ||||
|     (mut $place:expr, $t:ident) => {{ | ||||
|         #![allow(unused_unsafe)] // we're sometimes used within an unsafe block
 | ||||
|         unsafe { &mut *(ptr::addr_of_mut!($place) as *mut NonNull<$t>) } | ||||
|     }}, | ||||
|     ($place:expr, $t:ident) => {{ | ||||
|         #![allow(unused_unsafe)] // we're sometimes used within an unsafe block
 | ||||
|         unsafe { *(ptr::addr_of!($place) as *const NonNull<$t>) } | ||||
|     }}, | ||||
| } | ||||
| 
 | ||||
| /// An iterator that moves out of a vector.
 | ||||
| ///
 | ||||
| /// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
 | ||||
| /// (provided by the [`IntoIterator`] trait).
 | ||||
| ///
 | ||||
| /// # Example
 | ||||
| ///
 | ||||
| /// ```
 | ||||
| /// let v = vec![0, 1, 2];
 | ||||
| /// let iter: std::vec::IntoIter<_> = v.into_iter();
 | ||||
| /// ```
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| #[rustc_insignificant_dtor] | ||||
| pub struct IntoIter< | ||||
|     T, | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, | ||||
| > { | ||||
|     pub(super) buf: NonNull<T>, | ||||
|     pub(super) phantom: PhantomData<T>, | ||||
|     pub(super) cap: usize, | ||||
|     // the drop impl reconstructs a RawVec from buf, cap and alloc
 | ||||
|     // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
 | ||||
|     pub(super) alloc: ManuallyDrop<A>, | ||||
|     pub(super) ptr: NonNull<T>, | ||||
|     /// If T is a ZST, this is actually ptr+len. This encoding is picked so that
 | ||||
|     /// ptr == end is a quick test for the Iterator being empty, that works
 | ||||
|     /// for both ZST and non-ZST.
 | ||||
|     /// For non-ZSTs the pointer is treated as `NonNull<T>`
 | ||||
|     pub(super) end: *const T, | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "vec_intoiter_debug", since = "1.13.0")] | ||||
| impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         f.debug_tuple("IntoIter").field(&self.as_slice()).finish() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T, A: Allocator> IntoIter<T, A> { | ||||
|     /// Returns the remaining items of this iterator as a slice.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let vec = vec!['a', 'b', 'c'];
 | ||||
|     /// let mut into_iter = vec.into_iter();
 | ||||
|     /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
 | ||||
|     /// let _ = into_iter.next().unwrap();
 | ||||
|     /// assert_eq!(into_iter.as_slice(), &['b', 'c']);
 | ||||
|     /// ```
 | ||||
|     #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] | ||||
|     pub fn as_slice(&self) -> &[T] { | ||||
|         unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len()) } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the remaining items of this iterator as a mutable slice.
 | ||||
|     ///
 | ||||
|     /// # Examples
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// let vec = vec!['a', 'b', 'c'];
 | ||||
|     /// let mut into_iter = vec.into_iter();
 | ||||
|     /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
 | ||||
|     /// into_iter.as_mut_slice()[2] = 'z';
 | ||||
|     /// assert_eq!(into_iter.next().unwrap(), 'a');
 | ||||
|     /// assert_eq!(into_iter.next().unwrap(), 'b');
 | ||||
|     /// assert_eq!(into_iter.next().unwrap(), 'z');
 | ||||
|     /// ```
 | ||||
|     #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] | ||||
|     pub fn as_mut_slice(&mut self) -> &mut [T] { | ||||
|         unsafe { &mut *self.as_raw_mut_slice() } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns a reference to the underlying allocator.
 | ||||
|     #[unstable(feature = "allocator_api", issue = "32838")] | ||||
|     #[inline] | ||||
|     pub fn allocator(&self) -> &A { | ||||
|         &self.alloc | ||||
|     } | ||||
| 
 | ||||
|     fn as_raw_mut_slice(&mut self) -> *mut [T] { | ||||
|         ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), self.len()) | ||||
|     } | ||||
| 
 | ||||
|     /// Drops remaining elements and relinquishes the backing allocation.
 | ||||
|     /// This method guarantees it won't panic before relinquishing
 | ||||
|     /// the backing allocation.
 | ||||
|     ///
 | ||||
|     /// This is roughly equivalent to the following, but more efficient
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
 | ||||
|     /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
 | ||||
|     /// (&mut into_iter).for_each(drop);
 | ||||
|     /// std::mem::forget(into_iter);
 | ||||
|     /// ```
 | ||||
|     ///
 | ||||
|     /// This method is used by in-place iteration, refer to the vec::in_place_collect
 | ||||
|     /// documentation for an overview.
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     pub(super) fn forget_allocation_drop_remaining(&mut self) { | ||||
|         let remaining = self.as_raw_mut_slice(); | ||||
| 
 | ||||
|         // overwrite the individual fields instead of creating a new
 | ||||
|         // struct and then overwriting &mut self.
 | ||||
|         // this creates less assembly
 | ||||
|         self.cap = 0; | ||||
|         self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) }; | ||||
|         self.ptr = self.buf; | ||||
|         self.end = self.buf.as_ptr(); | ||||
| 
 | ||||
|         // Dropping the remaining elements can panic, so this needs to be
 | ||||
|         // done only after updating the other fields.
 | ||||
|         unsafe { | ||||
|             ptr::drop_in_place(remaining); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
 | ||||
|     pub(crate) fn forget_remaining_elements(&mut self) { | ||||
|         // For the ZST case, it is crucial that we mutate `end` here, not `ptr`.
 | ||||
|         // `ptr` must stay aligned, while `end` may be unaligned.
 | ||||
|         self.end = self.ptr.as_ptr(); | ||||
|     } | ||||
| 
 | ||||
|     #[cfg(not(no_global_oom_handling))] | ||||
|     #[inline] | ||||
|     pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> { | ||||
|         // Keep our `Drop` impl from dropping the elements and the allocator
 | ||||
|         let mut this = ManuallyDrop::new(self); | ||||
| 
 | ||||
|         // SAFETY: This allocation originally came from a `Vec`, so it passes
 | ||||
|         // all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
 | ||||
|         // so the `sub_ptr`s below cannot wrap, and will produce a well-formed
 | ||||
|         // range. `end` ≤ `buf + cap`, so the range will be in-bounds.
 | ||||
|         // Taking `alloc` is ok because nothing else is going to look at it,
 | ||||
|         // since our `Drop` impl isn't going to run so there's no more code.
 | ||||
|         unsafe { | ||||
|             let buf = this.buf.as_ptr(); | ||||
|             let initialized = if T::IS_ZST { | ||||
|                 // All the pointers are the same for ZSTs, so it's fine to
 | ||||
|                 // say that they're all at the beginning of the "allocation".
 | ||||
|                 0..this.len() | ||||
|             } else { | ||||
|                 this.ptr.sub_ptr(this.buf)..this.end.sub_ptr(buf) | ||||
|             }; | ||||
|             let cap = this.cap; | ||||
|             let alloc = ManuallyDrop::take(&mut this.alloc); | ||||
|             VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")] | ||||
| impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> { | ||||
|     fn as_ref(&self) -> &[T] { | ||||
|         self.as_slice() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {} | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {} | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| impl<T, A: Allocator> Iterator for IntoIter<T, A> { | ||||
|     type Item = T; | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn next(&mut self) -> Option<T> { | ||||
|         if T::IS_ZST { | ||||
|             if self.ptr.as_ptr() == self.end as *mut _ { | ||||
|                 None | ||||
|             } else { | ||||
|                 // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
 | ||||
|                 // reducing the `end`.
 | ||||
|                 self.end = self.end.wrapping_byte_sub(1); | ||||
| 
 | ||||
|                 // Make up a value of this ZST.
 | ||||
|                 Some(unsafe { mem::zeroed() }) | ||||
|             } | ||||
|         } else { | ||||
|             if self.ptr == non_null!(self.end, T) { | ||||
|                 None | ||||
|             } else { | ||||
|                 let old = self.ptr; | ||||
|                 self.ptr = unsafe { old.add(1) }; | ||||
| 
 | ||||
|                 Some(unsafe { ptr::read(old.as_ptr()) }) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn size_hint(&self) -> (usize, Option<usize>) { | ||||
|         let exact = if T::IS_ZST { | ||||
|             self.end.addr().wrapping_sub(self.ptr.as_ptr().addr()) | ||||
|         } else { | ||||
|             unsafe { non_null!(self.end, T).sub_ptr(self.ptr) } | ||||
|         }; | ||||
|         (exact, Some(exact)) | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> { | ||||
|         let step_size = self.len().min(n); | ||||
|         let to_drop = ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), step_size); | ||||
|         if T::IS_ZST { | ||||
|             // See `next` for why we sub `end` here.
 | ||||
|             self.end = self.end.wrapping_byte_sub(step_size); | ||||
|         } else { | ||||
|             // SAFETY: the min() above ensures that step_size is in bounds
 | ||||
|             self.ptr = unsafe { self.ptr.add(step_size) }; | ||||
|         } | ||||
|         // SAFETY: the min() above ensures that step_size is in bounds
 | ||||
|         unsafe { | ||||
|             ptr::drop_in_place(to_drop); | ||||
|         } | ||||
|         NonZeroUsize::new(n - step_size).map_or(Ok(()), Err) | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn count(self) -> usize { | ||||
|         self.len() | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> { | ||||
|         let mut raw_ary = MaybeUninit::uninit_array(); | ||||
| 
 | ||||
|         let len = self.len(); | ||||
| 
 | ||||
|         if T::IS_ZST { | ||||
|             if len < N { | ||||
|                 self.forget_remaining_elements(); | ||||
|                 // Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
 | ||||
|                 return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) }); | ||||
|             } | ||||
| 
 | ||||
|             self.end = self.end.wrapping_byte_sub(N); | ||||
|             // Safety: ditto
 | ||||
|             return Ok(unsafe { raw_ary.transpose().assume_init() }); | ||||
|         } | ||||
| 
 | ||||
|         if len < N { | ||||
|             // Safety: `len` indicates that this many elements are available and we just checked that
 | ||||
|             // it fits into the array.
 | ||||
|             unsafe { | ||||
|                 ptr::copy_nonoverlapping(self.ptr.as_ptr(), raw_ary.as_mut_ptr() as *mut T, len); | ||||
|                 self.forget_remaining_elements(); | ||||
|                 return Err(array::IntoIter::new_unchecked(raw_ary, 0..len)); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         // Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
 | ||||
|         // the array.
 | ||||
|         return unsafe { | ||||
|             ptr::copy_nonoverlapping(self.ptr.as_ptr(), raw_ary.as_mut_ptr() as *mut T, N); | ||||
|             self.ptr = self.ptr.add(N); | ||||
|             Ok(raw_ary.transpose().assume_init()) | ||||
|         }; | ||||
|     } | ||||
| 
 | ||||
|     unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item | ||||
|     where | ||||
|         Self: TrustedRandomAccessNoCoerce, | ||||
|     { | ||||
|         // SAFETY: the caller must guarantee that `i` is in bounds of the
 | ||||
|         // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
 | ||||
|         // is guaranteed to pointer to an element of the `Vec<T>` and
 | ||||
|         // thus guaranteed to be valid to dereference.
 | ||||
|         //
 | ||||
|         // Also note the implementation of `Self: TrustedRandomAccess` requires
 | ||||
|         // that `T: Copy` so reading elements from the buffer doesn't invalidate
 | ||||
|         // them for `Drop`.
 | ||||
|         unsafe { if T::IS_ZST { mem::zeroed() } else { self.ptr.add(i).read() } } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { | ||||
|     #[inline] | ||||
|     fn next_back(&mut self) -> Option<T> { | ||||
|         if T::IS_ZST { | ||||
|             if self.end as *mut _ == self.ptr.as_ptr() { | ||||
|                 None | ||||
|             } else { | ||||
|                 // See above for why 'ptr.offset' isn't used
 | ||||
|                 self.end = self.end.wrapping_byte_sub(1); | ||||
| 
 | ||||
|                 // Make up a value of this ZST.
 | ||||
|                 Some(unsafe { mem::zeroed() }) | ||||
|             } | ||||
|         } else { | ||||
|             if non_null!(self.end, T) == self.ptr { | ||||
|                 None | ||||
|             } else { | ||||
|                 let new_end = unsafe { non_null!(self.end, T).sub(1) }; | ||||
|                 *non_null!(mut self.end, T) = new_end; | ||||
| 
 | ||||
|                 Some(unsafe { ptr::read(new_end.as_ptr()) }) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> { | ||||
|         let step_size = self.len().min(n); | ||||
|         if T::IS_ZST { | ||||
|             // SAFETY: same as for advance_by()
 | ||||
|             self.end = self.end.wrapping_byte_sub(step_size); | ||||
|         } else { | ||||
|             // SAFETY: same as for advance_by()
 | ||||
|             self.end = unsafe { self.end.sub(step_size) }; | ||||
|         } | ||||
|         let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size); | ||||
|         // SAFETY: same as for advance_by()
 | ||||
|         unsafe { | ||||
|             ptr::drop_in_place(to_drop); | ||||
|         } | ||||
|         NonZeroUsize::new(n - step_size).map_or(Ok(()), Err) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { | ||||
|     fn is_empty(&self) -> bool { | ||||
|         if T::IS_ZST { | ||||
|             self.ptr.as_ptr() == self.end as *mut _ | ||||
|         } else { | ||||
|             self.ptr == non_null!(self.end, T) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "fused", since = "1.26.0")] | ||||
| impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {} | ||||
| 
 | ||||
| #[doc(hidden)] | ||||
| #[unstable(issue = "none", feature = "trusted_fused")] | ||||
| unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {} | ||||
| 
 | ||||
| #[unstable(feature = "trusted_len", issue = "37572")] | ||||
| unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {} | ||||
| 
 | ||||
| #[stable(feature = "default_iters", since = "1.70.0")] | ||||
| impl<T, A> Default for IntoIter<T, A> | ||||
| where | ||||
|     A: Allocator + Default, | ||||
| { | ||||
|     /// Creates an empty `vec::IntoIter`.
 | ||||
|     ///
 | ||||
|     /// ```
 | ||||
|     /// # use std::vec;
 | ||||
|     /// let iter: vec::IntoIter<u8> = Default::default();
 | ||||
|     /// assert_eq!(iter.len(), 0);
 | ||||
|     /// assert_eq!(iter.as_slice(), &[]);
 | ||||
|     /// ```
 | ||||
|     fn default() -> Self { | ||||
|         super::Vec::new_in(Default::default()).into_iter() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[doc(hidden)] | ||||
| #[unstable(issue = "none", feature = "std_internals")] | ||||
| #[rustc_unsafe_specialization_marker] | ||||
| pub trait NonDrop {} | ||||
| 
 | ||||
| // T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
 | ||||
| // and thus we can't implement drop-handling
 | ||||
| #[unstable(issue = "none", feature = "std_internals")] | ||||
| impl<T: Copy> NonDrop for T {} | ||||
| 
 | ||||
| #[doc(hidden)] | ||||
| #[unstable(issue = "none", feature = "std_internals")] | ||||
| // TrustedRandomAccess (without NoCoerce) must not be implemented because
 | ||||
| // subtypes/supertypes of `T` might not be `NonDrop`
 | ||||
| unsafe impl<T, A: Allocator> TrustedRandomAccessNoCoerce for IntoIter<T, A> | ||||
| where | ||||
|     T: NonDrop, | ||||
| { | ||||
|     const MAY_HAVE_SIDE_EFFECT: bool = false; | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| #[stable(feature = "vec_into_iter_clone", since = "1.8.0")] | ||||
| impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> { | ||||
|     #[cfg(not(test))] | ||||
|     fn clone(&self) -> Self { | ||||
|         self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter() | ||||
|     } | ||||
|     #[cfg(test)] | ||||
|     fn clone(&self) -> Self { | ||||
|         crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[stable(feature = "rust1", since = "1.0.0")] | ||||
| unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> { | ||||
|     fn drop(&mut self) { | ||||
|         struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>); | ||||
| 
 | ||||
|         impl<T, A: Allocator> Drop for DropGuard<'_, T, A> { | ||||
|             fn drop(&mut self) { | ||||
|                 unsafe { | ||||
|                     // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
 | ||||
|                     let alloc = ManuallyDrop::take(&mut self.0.alloc); | ||||
|                     // RawVec handles deallocation
 | ||||
|                     let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         let guard = DropGuard(self); | ||||
|         // destroy the remaining elements
 | ||||
|         unsafe { | ||||
|             ptr::drop_in_place(guard.0.as_raw_mut_slice()); | ||||
|         } | ||||
|         // now `guard` will be dropped and do the rest
 | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // In addition to the SAFETY invariants of the following three unsafe traits
 | ||||
| // also refer to the vec::in_place_collect module documentation to get an overview
 | ||||
| #[unstable(issue = "none", feature = "inplace_iteration")] | ||||
| #[doc(hidden)] | ||||
| unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> { | ||||
|     const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1); | ||||
|     const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1); | ||||
| } | ||||
| 
 | ||||
| #[unstable(issue = "none", feature = "inplace_iteration")] | ||||
| #[doc(hidden)] | ||||
| unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> { | ||||
|     type Source = Self; | ||||
| 
 | ||||
|     #[inline] | ||||
|     unsafe fn as_inner(&mut self) -> &mut Self::Source { | ||||
|         self | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| unsafe impl<T> AsVecIntoIter for IntoIter<T> { | ||||
|     type Item = T; | ||||
| 
 | ||||
|     fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> { | ||||
|         self | ||||
|     } | ||||
| } | ||||
|  | @ -1,204 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| use core::num::{Saturating, Wrapping}; | ||||
| 
 | ||||
| use crate::boxed::Box; | ||||
| 
 | ||||
| #[rustc_specialization_trait] | ||||
| pub(super) unsafe trait IsZero { | ||||
|     /// Whether this value's representation is all zeros,
 | ||||
|     /// or can be represented with all zeroes.
 | ||||
|     fn is_zero(&self) -> bool; | ||||
| } | ||||
| 
 | ||||
| macro_rules! impl_is_zero { | ||||
|     ($t:ty, $is_zero:expr) => { | ||||
|         unsafe impl IsZero for $t { | ||||
|             #[inline] | ||||
|             fn is_zero(&self) -> bool { | ||||
|                 $is_zero(*self) | ||||
|             } | ||||
|         } | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
 | ||||
| impl_is_zero!(i16, |x| x == 0); | ||||
| impl_is_zero!(i32, |x| x == 0); | ||||
| impl_is_zero!(i64, |x| x == 0); | ||||
| impl_is_zero!(i128, |x| x == 0); | ||||
| impl_is_zero!(isize, |x| x == 0); | ||||
| 
 | ||||
| impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
 | ||||
| impl_is_zero!(u16, |x| x == 0); | ||||
| impl_is_zero!(u32, |x| x == 0); | ||||
| impl_is_zero!(u64, |x| x == 0); | ||||
| impl_is_zero!(u128, |x| x == 0); | ||||
| impl_is_zero!(usize, |x| x == 0); | ||||
| 
 | ||||
| impl_is_zero!(bool, |x| x == false); | ||||
| impl_is_zero!(char, |x| x == '\0'); | ||||
| 
 | ||||
| impl_is_zero!(f32, |x: f32| x.to_bits() == 0); | ||||
| impl_is_zero!(f64, |x: f64| x.to_bits() == 0); | ||||
| 
 | ||||
| unsafe impl<T> IsZero for *const T { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         (*self).is_null() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| unsafe impl<T> IsZero for *mut T { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         (*self).is_null() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         // Because this is generated as a runtime check, it's not obvious that
 | ||||
|         // it's worth doing if the array is really long. The threshold here
 | ||||
|         // is largely arbitrary, but was picked because as of 2022-07-01 LLVM
 | ||||
|         // fails to const-fold the check in `vec![[1; 32]; n]`
 | ||||
|         // See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
 | ||||
|         // Feel free to tweak if you have better evidence.
 | ||||
| 
 | ||||
|         N <= 16 && self.iter().all(IsZero::is_zero) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // This is recursive macro.
 | ||||
| macro_rules! impl_for_tuples { | ||||
|     // Stopper
 | ||||
|     () => { | ||||
|         // No use for implementing for empty tuple because it is ZST.
 | ||||
|     }; | ||||
|     ($first_arg:ident $(,$rest:ident)*) => { | ||||
|         unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){ | ||||
|             #[inline] | ||||
|             fn is_zero(&self) -> bool{ | ||||
|                 // Destructure tuple to N references
 | ||||
|                 // Rust allows to hide generic params by local variable names.
 | ||||
|                 #[allow(non_snake_case)] | ||||
|                 let ($first_arg, $($rest,)*) = self; | ||||
| 
 | ||||
|                 $first_arg.is_zero() | ||||
|                     $( && $rest.is_zero() )* | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         impl_for_tuples!($($rest),*); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl_for_tuples!(A, B, C, D, E, F, G, H); | ||||
| 
 | ||||
| // `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
 | ||||
| // For fat pointers, the bytes that would be the pointer metadata in the `Some`
 | ||||
| // variant are padding in the `None` variant, so ignoring them and
 | ||||
| // zero-initializing instead is ok.
 | ||||
| // `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
 | ||||
| // `SpecFromElem`.
 | ||||
| 
 | ||||
| unsafe impl<T: ?Sized> IsZero for Option<&T> { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         self.is_none() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| unsafe impl<T: ?Sized> IsZero for Option<Box<T>> { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         self.is_none() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // `Option<num::NonZeroU32>` and similar have a representation guarantee that
 | ||||
| // they're the same size as the corresponding `u32` type, as well as a guarantee
 | ||||
| // that transmuting between `NonZeroU32` and `Option<num::NonZeroU32>` works.
 | ||||
| // While the documentation officially makes it UB to transmute from `None`,
 | ||||
| // we're the standard library so we can make extra inferences, and we know that
 | ||||
| // the only niche available to represent `None` is the one that's all zeros.
 | ||||
| 
 | ||||
| macro_rules! impl_is_zero_option_of_nonzero { | ||||
|     ($($t:ident,)+) => {$( | ||||
|         unsafe impl IsZero for Option<core::num::$t> { | ||||
|             #[inline] | ||||
|             fn is_zero(&self) -> bool { | ||||
|                 self.is_none() | ||||
|             } | ||||
|         } | ||||
|     )+}; | ||||
| } | ||||
| 
 | ||||
| impl_is_zero_option_of_nonzero!( | ||||
|     NonZeroU8, | ||||
|     NonZeroU16, | ||||
|     NonZeroU32, | ||||
|     NonZeroU64, | ||||
|     NonZeroU128, | ||||
|     NonZeroI8, | ||||
|     NonZeroI16, | ||||
|     NonZeroI32, | ||||
|     NonZeroI64, | ||||
|     NonZeroI128, | ||||
|     NonZeroUsize, | ||||
|     NonZeroIsize, | ||||
| ); | ||||
| 
 | ||||
| macro_rules! impl_is_zero_option_of_num { | ||||
|     ($($t:ty,)+) => {$( | ||||
|         unsafe impl IsZero for Option<$t> { | ||||
|             #[inline] | ||||
|             fn is_zero(&self) -> bool { | ||||
|                 const { | ||||
|                     let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() }; | ||||
|                     assert!(none.is_none()); | ||||
|                 } | ||||
|                 self.is_none() | ||||
|             } | ||||
|         } | ||||
|     )+}; | ||||
| } | ||||
| 
 | ||||
| impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,); | ||||
| 
 | ||||
| unsafe impl<T: IsZero> IsZero for Wrapping<T> { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         self.0.is_zero() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| unsafe impl<T: IsZero> IsZero for Saturating<T> { | ||||
|     #[inline] | ||||
|     fn is_zero(&self) -> bool { | ||||
|         self.0.is_zero() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| macro_rules! impl_for_optional_bool { | ||||
|     ($($t:ty,)+) => {$( | ||||
|         unsafe impl IsZero for $t { | ||||
|             #[inline] | ||||
|             fn is_zero(&self) -> bool { | ||||
|                 // SAFETY: This is *not* a stable layout guarantee, but
 | ||||
|                 // inside `core` we're allowed to rely on the current rustc
 | ||||
|                 // behaviour that options of bools will be one byte with
 | ||||
|                 // no padding, so long as they're nested less than 254 deep.
 | ||||
|                 let raw: u8 = unsafe { core::mem::transmute(*self) }; | ||||
|                 raw == 0 | ||||
|             } | ||||
|         } | ||||
|     )+}; | ||||
| } | ||||
| impl_for_optional_bool! { | ||||
|     Option<bool>, | ||||
|     Option<Option<bool>>, | ||||
|     Option<Option<Option<bool>>>, | ||||
|     // Could go further, but not worth the metadata overhead
 | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,49 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| use crate::alloc::Allocator; | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| use crate::borrow::Cow; | ||||
| 
 | ||||
| use super::Vec; | ||||
| 
 | ||||
| macro_rules! __impl_slice_eq1 { | ||||
|     ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => { | ||||
|         #[$stability] | ||||
|         impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs | ||||
|         where | ||||
|             T: PartialEq<U>, | ||||
|             $($ty: $bound)? | ||||
|         { | ||||
|             #[inline] | ||||
|             fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] } | ||||
|             #[inline] | ||||
|             fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| __impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")]  } | ||||
| __impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")]  } | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| __impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| __impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| __impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| __impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] } | ||||
| 
 | ||||
| // NOTE: some less important impls are omitted to reduce code bloat
 | ||||
| // FIXME(Centril): Reconsider this?
 | ||||
| //__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
 | ||||
| //__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
 | ||||
| //__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
 | ||||
| //__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
 | ||||
| //__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
 | ||||
| //__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
 | ||||
| //__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }
 | ||||
|  | @ -1,35 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| // Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
 | ||||
| //
 | ||||
| // The idea is: The length field in SetLenOnDrop is a local variable
 | ||||
| // that the optimizer will see does not alias with any stores through the Vec's data
 | ||||
| // pointer. This is a workaround for alias analysis issue #32155
 | ||||
| pub(super) struct SetLenOnDrop<'a> { | ||||
|     len: &'a mut usize, | ||||
|     local_len: usize, | ||||
| } | ||||
| 
 | ||||
| impl<'a> SetLenOnDrop<'a> { | ||||
|     #[inline] | ||||
|     pub(super) fn new(len: &'a mut usize) -> Self { | ||||
|         SetLenOnDrop { local_len: *len, len } | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     pub(super) fn increment_len(&mut self, increment: usize) { | ||||
|         self.local_len += increment; | ||||
|     } | ||||
| 
 | ||||
|     #[inline] | ||||
|     pub(super) fn current_len(&self) -> usize { | ||||
|         self.local_len | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Drop for SetLenOnDrop<'_> { | ||||
|     #[inline] | ||||
|     fn drop(&mut self) { | ||||
|         *self.len = self.local_len; | ||||
|     } | ||||
| } | ||||
|  | @ -1,119 +0,0 @@ | |||
| // SPDX-License-Identifier: Apache-2.0 OR MIT
 | ||||
| 
 | ||||
| use crate::alloc::Allocator; | ||||
| use crate::collections::TryReserveError; | ||||
| use core::iter::TrustedLen; | ||||
| use core::slice::{self}; | ||||
| 
 | ||||
| use super::{IntoIter, Vec}; | ||||
| 
 | ||||
| // Specialization trait used for Vec::extend
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| pub(super) trait SpecExtend<T, I> { | ||||
|     fn spec_extend(&mut self, iter: I); | ||||
| } | ||||
| 
 | ||||
| // Specialization trait used for Vec::try_extend
 | ||||
| pub(super) trait TrySpecExtend<T, I> { | ||||
|     fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>; | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A> | ||||
| where | ||||
|     I: Iterator<Item = T>, | ||||
| { | ||||
|     default fn spec_extend(&mut self, iter: I) { | ||||
|         self.extend_desugared(iter) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A> | ||||
| where | ||||
|     I: Iterator<Item = T>, | ||||
| { | ||||
|     default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> { | ||||
|         self.try_extend_desugared(iter) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A> | ||||
| where | ||||
|     I: TrustedLen<Item = T>, | ||||
| { | ||||
|     default fn spec_extend(&mut self, iterator: I) { | ||||
|         self.extend_trusted(iterator) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A> | ||||
| where | ||||
|     I: TrustedLen<Item = T>, | ||||
| { | ||||
|     default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { | ||||
|         self.try_extend_trusted(iterator) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> { | ||||
|     fn spec_extend(&mut self, mut iterator: IntoIter<T>) { | ||||
|         unsafe { | ||||
|             self.append_elements(iterator.as_slice() as _); | ||||
|         } | ||||
|         iterator.forget_remaining_elements(); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> { | ||||
|     fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> { | ||||
|         unsafe { | ||||
|             self.try_append_elements(iterator.as_slice() as _)?; | ||||
|         } | ||||
|         iterator.forget_remaining_elements(); | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for Vec<T, A> | ||||
| where | ||||
|     I: Iterator<Item = &'a T>, | ||||
|     T: Clone, | ||||
| { | ||||
|     default fn spec_extend(&mut self, iterator: I) { | ||||
|         self.spec_extend(iterator.cloned()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<'a, T: 'a, I, A: Allocator> TrySpecExtend<&'a T, I> for Vec<T, A> | ||||
| where | ||||
|     I: Iterator<Item = &'a T>, | ||||
|     T: Clone, | ||||
| { | ||||
|     default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { | ||||
|         self.try_spec_extend(iterator.cloned()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(not(no_global_oom_handling))] | ||||
| impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A> | ||||
| where | ||||
|     T: Copy, | ||||
| { | ||||
|     fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { | ||||
|         let slice = iterator.as_slice(); | ||||
|         unsafe { self.append_elements(slice) }; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<'a, T: 'a, A: Allocator> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A> | ||||
| where | ||||
|     T: Copy, | ||||
| { | ||||
|     fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> { | ||||
|         let slice = iterator.as_slice(); | ||||
|         unsafe { self.try_append_elements(slice) } | ||||
|     } | ||||
| } | ||||
		Loading…
	
		Reference in a new issue
	
	 Wedson Almeida Filho
						Wedson Almeida Filho