Bug 1912019 - Add a modified dependency to allocator_api2. r=supply-chain-reviewers

See also https://github.com/zakarumych/allocator-api2/issues/10

This patch was produced with the following steps:
 - Vendor allocator-api2 normally.
 - Modify the vendored source in third_party/rust.
   - Stop exporting the Box implementation.
   - Change the version to 0.2.999.
 - Run cargo update -p allocator-api2 --precise 0.2.999

Differential Revision: https://phabricator.services.mozilla.com/D218729
This commit is contained in:
Nicolas Silva
2024-09-18 10:17:24 +00:00
parent af21a5551a
commit a33d4d50d0
29 changed files with 8186 additions and 0 deletions

8
Cargo.lock generated
View File

@@ -37,6 +37,13 @@ dependencies = [
"memchr",
]
[[package]]
name = "allocator-api2"
version = "0.2.999"
dependencies = [
"serde",
]
[[package]]
name = "alsa"
version = "0.8.1"
@@ -6698,6 +6705,7 @@ dependencies = [
name = "webrender"
version = "0.62.0"
dependencies = [
"allocator-api2",
"bincode",
"bitflags 2.6.0",
"build-parallel",

View File

@@ -222,3 +222,5 @@ webext-storage = { git = "https://github.com/mozilla/application-services", rev
# Patch `gpu-descriptor` 0.3.0 to remove unnecessary `allocator-api2` dep.:
# Still waiting for the now-merged <https://github.com/zakarumych/gpu-descriptor/pull/40> to be released.
gpu-descriptor = { git = "https://github.com/zakarumych/gpu-descriptor", rev = "7b71a4e47c81903ad75e2c53deb5ab1310f6ff4d" }
allocator-api2 = { path = "third_party/rust/allocator-api2" }

10
gfx/wr/Cargo.lock generated
View File

@@ -23,6 +23,15 @@ dependencies = [
"memchr",
]
[[package]]
name = "allocator-api2"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
dependencies = [
"serde",
]
[[package]]
name = "android_log-sys"
version = "0.2.0"
@@ -3067,6 +3076,7 @@ dependencies = [
name = "webrender"
version = "0.62.0"
dependencies = [
"allocator-api2",
"bincode",
"bitflags 2.4.2",
"build-parallel",

View File

@@ -57,6 +57,7 @@ firefox-on-glean = { version = "0.1.0", optional = true }
swgl = { path = "../swgl", optional = true }
topological-sort = "0.1"
peek-poke = { version = "0.3", path = "../peek-poke" }
allocator-api2 = { version = "0.2.18", features = ["alloc", "serde"] }
[dev-dependencies]
mozangle = "0.3.3"

View File

@@ -552,6 +552,11 @@ who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
delta = "0.7.18 -> 0.7.20"
[[audits.allocator-api2]]
who = "Nicolas Silva <nical@fastmail.com>"
criteria = "safe-to-deploy"
version = "0.2.18"
[[audits.alsa]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"

View File

@@ -19,6 +19,10 @@ url = "https://raw.githubusercontent.com/divviup/libprio-rs/main/supply-chain/au
[imports.mozilla]
url = "https://raw.githubusercontent.com/mozilla/supply-chain/main/audits.toml"
[policy.allocator-api2]
audit-as-crates-io = true
notes = "This is the upstream code without the Box implementation which may have a soundness issue."
[policy.any_all_workaround]
audit-as-crates-io = true
notes = "This is the upstream code plus the ARM intrinsics workaround from qcms, see bug 1882209."

View File

@@ -1,6 +1,10 @@
# cargo-vet imports lock
[[unpublished.allocator-api2]]
version = "0.2.999"
audited_as = "0.2.18"
[[publisher.aho-corasick]]
version = "1.1.0"
when = "2023-09-18"

View File

@@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"886f8c688db0c22d24b650df0dc30a39d05d54d0e562c00d9574bf31cbf73251","Cargo.toml":"8c0d2ca1b5a6063aedcd462337eeb4dfb755d81e8d132fe810419689e45eef42","LICENSE-APACHE":"20fe7b00e904ed690e3b9fd6073784d3fc428141dbd10b81c01fd143d0797f58","LICENSE-MIT":"36516aefdc84c5d5a1e7485425913a22dbda69eb1930c5e84d6ae4972b5194b9","README.md":"7bf09e77c1d8e9292992b717d88e33d031439aa31dc9e7bb617464270519b051","src/lib.rs":"c937309febe24f97bc637650137311d5b8097b8574b0e973f4d6fb591c3448f7","src/nightly.rs":"fcff4d236e23bc95b1ce2c00140807ba3698cc01233d910d65d74986bb36f161","src/stable/alloc/global.rs":"14836ad7d73a364474fc153b24a1f17ad0e60a69b90a8721dc1059eada8bf869","src/stable/alloc/mod.rs":"866dafd3984dd246e381d8ad1c2b3e02a60c3421b598ca493aa83f9b6422608d","src/stable/alloc/system.rs":"db5d5bf088eecac3fc5ff1281e1bf26ca36dd38f13cd52c49d95ff1bab064254","src/stable/boxed.rs":"8b9b7f4cebbc1629c478dce0dd8227db16508e1383f24490d32eab7aeb3a0cea","src/stable/macros.rs":"74490796a766338d0163f40a37612cd9ea2de58ae3d8e9abf6c7bcf81d9be4a6","src/stable/mod.rs":"a6a724e10e4db4e3b7960c65bac803152a1115af46b898ff8a61e486365c16c7","src/stable/raw_vec.rs":"8cc0e3e4d5fd21e0e83776ff21c576cbb87b69647903ee9b8f5372f8781a7328","src/stable/slice.rs":"089263b058e6c185467bad7ad14908479e5675408fc70a8291e5dddaef36035a","src/stable/vec/drain.rs":"740cd2e0f31eeb0146bbd0f645a14fe12bacd3912f003db433ddc6b3a178461f","src/stable/vec/into_iter.rs":"88c22b09682cd90c7362d702d0501566173b2d836cf82a2b92ae11fdef5b9435","src/stable/vec/mod.rs":"1561b75d0bbcdf64f47bd7f1661088b68796f0e7e02a4e9391d8a50010b86f6b","src/stable/vec/partial_eq.rs":"9f1b18605164a62b58d9e17914d573698735de31c51ceb8bd3666e83d32df370","src/stable/vec/set_len_on_drop.rs":"561342e22a194e515cc25c9a1bcd827ca24c4db033e9e2c4266fbdd2fb16e5bc","src/stable/vec/splice.rs":"95a460b3a7b4af60fdc9ba04d3a719b61a0c11786cd2d8823d022e22c397f9c9"},"package":"5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"}

View File

@@ -0,0 +1,7 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]

View File

@@ -0,0 +1,38 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
# This is a fork of allocator-api2 0.2.18 with Box removed.
# See https://github.com/zakarumych/allocator-api2/issues/10
[package]
edition = "2018"
name = "allocator-api2"
version = "0.2.999"
authors = ["Zakarum <zaq.dev@icloud.com>"]
description = "Mirror of Rust's allocator API"
homepage = "https://github.com/zakarumych/allocator-api2"
documentation = "https://docs.rs/allocator-api2"
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/zakarumych/allocator-api2"
[dependencies.serde]
version = "1.0"
optional = true
[features]
alloc = []
default = ["std"]
nightly = []
std = ["alloc"]
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(no_global_oom_handling)'] }

View File

@@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,53 @@
# allocator-api2
[![crates](https://img.shields.io/crates/v/allocator-api2.svg?style=for-the-badge&label=allocator-api2)](https://crates.io/crates/allocator-api2)
[![docs](https://img.shields.io/badge/docs.rs-allocator--api2-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white)](https://docs.rs/allocator-api2)
[![actions](https://img.shields.io/github/actions/workflow/status/zakarumych/allocator-api2/badge.yml?branch=main&style=for-the-badge)](https://github.com/zakarumych/allocator-api2/actions/workflows/badge.yml)
[![MIT/Apache](https://img.shields.io/badge/license-MIT%2FApache-blue.svg?style=for-the-badge)](COPYING)
![loc](https://img.shields.io/tokei/lines/github/zakarumych/allocator-api2?style=for-the-badge)
This crate mirrors types and traits from Rust's unstable [`allocator_api`]
The intention of this crate is to serve as substitution for actual thing
for libs when build on stable and beta channels.
The target users are library authors who implement allocators or collection types
that use allocators, or anyone else who wants using [`allocator_api`]
The crate should be frequently updated with minor version bump.
When [`allocator_api`] is stable this crate will get version `1.0` and simply
re-export from `core`, `alloc` and `std`.
The code is mostly verbatim copy from rust repository.
Mostly attributes are removed.
## Usage
This paragraph describes how to use this crate correctly to ensure
compatibility and interoperability on both stable and nightly channels.
If you are writing a library that interacts with allocators API, you can
add this crate as a dependency and use the types and traits from this
crate instead of the ones in `core` or `alloc`.
This will allow your library to compile on stable and beta channels.
Your library *MAY* provide a feature that will enable "allocator-api2/nightly".
When this feature is enabled, your library *MUST* enable
unstable `#![feature(allocator_api)]` or it may not compile.
If feature is not provided, your library may not be compatible with the
rest of the users and cause compilation errors on nightly channel
when some other crate enables "allocator-api2/nightly" feature.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
## Contributions
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
[`allocator_api`]: https://doc.rust-lang.org/unstable-book/library-features/allocator-api.html

View File

@@ -0,0 +1,20 @@
//!
//! allocator-api2 crate.
//!
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(unused)]
#[cfg(feature = "alloc")]
extern crate alloc as alloc_crate;
#[cfg(not(feature = "nightly"))]
mod stable;
#[cfg(feature = "nightly")]
mod nightly;
#[cfg(not(feature = "nightly"))]
pub use self::stable::*;
#[cfg(feature = "nightly")]
pub use self::nightly::*;

View File

@@ -0,0 +1,5 @@
#[cfg(not(feature = "alloc"))]
pub use core::alloc;
#[cfg(feature = "alloc")]
pub use alloc_crate::{alloc, vec};

View File

@@ -0,0 +1,187 @@
use core::ptr::NonNull;
use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, realloc};
use crate::stable::{assume, invalid_mut};
use super::{AllocError, Allocator, Layout};
/// The global memory allocator.
///
/// This type implements the [`Allocator`] trait by forwarding calls
/// to the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// Note: while this type is unstable, the functionality it provides can be
/// accessed through the [free functions in `alloc`](crate#functions).
#[derive(Copy, Clone, Default, Debug)]
pub struct Global;
impl Global {
#[inline(always)]
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(layout.align()),
0,
))
}),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed {
alloc_zeroed(layout)
} else {
alloc(layout)
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
size,
)))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline(always)]
unsafe fn grow_impl(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => self.alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
assume(new_size >= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
unsafe impl Allocator for Global {
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
#[inline(always)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { dealloc(ptr.as_ptr(), layout) }
}
}
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.deallocate(ptr, old_layout);
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(new_layout.align()),
0,
)))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
assume(new_size <= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = self.allocate(new_layout)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}

View File

@@ -0,0 +1,416 @@
//! Memory allocation APIs
use core::{
fmt,
ptr::{self, NonNull},
};
#[cfg(feature = "alloc")]
mod global;
#[cfg(feature = "std")]
mod system;
pub use core::alloc::{GlobalAlloc, Layout, LayoutError};
#[cfg(feature = "alloc")]
pub use self::global::Global;
#[cfg(feature = "std")]
pub use self::system::System;
#[cfg(feature = "alloc")]
pub use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, realloc};
#[cfg(all(feature = "alloc", not(no_global_oom_handling)))]
pub use alloc_crate::alloc::handle_alloc_error;
/// The `AllocError` error indicates an allocation failure
/// that may be due to resource exhaustion or to
/// something wrong when combining the given input arguments with this
/// allocator.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct AllocError;
#[cfg(feature = "std")]
impl std::error::Error for AllocError {}
// (we need this for downstream impl of trait Error)
impl fmt::Display for AllocError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("memory allocation failed")
}
}
/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
/// data described via [`Layout`][].
///
/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
/// allocated memory.
///
/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
/// allocator does not support this (like jemalloc) or return a null pointer (such as
/// `libc::malloc`), this must be caught by the implementation.
///
/// ### Currently allocated memory
///
/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
/// means that:
///
/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
/// [`shrink`], and
///
/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
/// remains valid.
///
/// [`allocate`]: Allocator::allocate
/// [`grow`]: Allocator::grow
/// [`shrink`]: Allocator::shrink
/// [`deallocate`]: Allocator::deallocate
///
/// ### Memory fitting
///
/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
/// following conditions must hold:
///
/// * The block must be allocated with the same alignment as [`layout.align()`], and
///
/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
/// - `min` is the size of the layout most recently used to allocate the block, and
/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
///
/// [`layout.align()`]: Layout::align
/// [`layout.size()`]: Layout::size
///
/// # Safety
///
/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
/// until the instance and all of its clones are dropped,
///
/// * cloning or moving the allocator must not invalidate memory blocks returned from this
/// allocator. A cloned allocator must behave like the same allocator, and
///
/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
/// method of the allocator.
///
/// [*currently allocated*]: #currently-allocated-memory
pub unsafe trait Allocator {
/// Attempts to allocate a block of memory.
///
/// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
///
/// The returned block may have a larger size than specified by `layout.size()`, and may or may
/// not have its contents initialized.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
/// allocator's size or alignment constraints.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
/// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
/// allocator's size or alignment constraints.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let ptr = self.allocate(layout)?;
// SAFETY: `alloc` returns a valid memory block
unsafe { ptr.cast::<u8>().as_ptr().write_bytes(0, ptr.len()) }
Ok(ptr)
}
/// Deallocates the memory referenced by `ptr`.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
/// * `layout` must [*fit*] that block of memory.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
/// Attempts to extend the memory block.
///
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
/// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
/// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
/// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
/// allocation was grown in-place. The newly returned pointer is the only valid pointer
/// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
///
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if growing otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
let new_ptr = self.allocate(new_layout)?;
// SAFETY: because `new_layout.size()` must be greater than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size());
self.deallocate(ptr, old_layout);
}
Ok(new_ptr)
}
/// Behaves like `grow`, but also ensures that the new contents are set to zero before being
/// returned.
///
/// The memory block will contain the following contents after a successful call to
/// `grow_zeroed`:
/// * Bytes `0..old_layout.size()` are preserved from the original allocation.
/// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
/// the allocator implementation. `old_size` refers to the size of the memory block prior
/// to the `grow_zeroed` call, which may be larger than the size that was originally
/// requested when it was allocated.
/// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
/// block returned by the `grow_zeroed` call.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
///
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if growing otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
let new_ptr = self.allocate_zeroed(new_layout)?;
// SAFETY: because `new_layout.size()` must be greater than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size());
self.deallocate(ptr, old_layout);
}
Ok(new_ptr)
}
/// Attempts to shrink the memory block.
///
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
/// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
/// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
/// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
/// allocation was shrunk in-place. The newly returned pointer is the only valid pointer
/// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
///
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if shrinking otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
let new_ptr = self.allocate(new_layout)?;
// SAFETY: because `new_layout.size()` must be lower than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_layout.size());
self.deallocate(ptr, old_layout);
}
Ok(new_ptr)
}
/// Creates a "by reference" adapter for this instance of `Allocator`.
///
/// The returned adapter also implements `Allocator` and will simply borrow this.
#[inline(always)]
fn by_ref(&self) -> &Self
where
Self: Sized,
{
self
}
}
unsafe impl<A> Allocator for &A
where
A: Allocator + ?Sized,
{
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
(**self).allocate(layout)
}
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
(**self).allocate_zeroed(layout)
}
#[inline(always)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).deallocate(ptr, layout) }
}
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).grow(ptr, old_layout, new_layout) }
}
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
}
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).shrink(ptr, old_layout, new_layout) }
}
}

View File

@@ -0,0 +1,172 @@
use core::ptr::NonNull;
pub use std::alloc::System;
use crate::stable::{assume, invalid_mut};
use super::{AllocError, Allocator, GlobalAlloc as _, Layout};
unsafe impl Allocator for System {
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
alloc_impl(layout, false)
}
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
alloc_impl(layout, true)
}
#[inline(always)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { System.dealloc(ptr.as_ptr(), layout) }
}
}
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.deallocate(ptr, old_layout);
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(new_layout.align()),
0,
)))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
assume(new_size <= old_layout.size());
let raw_ptr = System.realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = self.allocate(new_layout)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
#[inline(always)]
fn alloc_impl(layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(layout.align()),
0,
))
}),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed {
System.alloc_zeroed(layout)
} else {
System.alloc(layout)
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
size,
)))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline(always)]
unsafe fn grow_impl(
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
assume(new_size >= old_layout.size());
let raw_ptr = System.realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = alloc_impl(new_layout, zeroed)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_size);
System.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,87 @@
/// Creates a [`Vec`] containing the arguments.
///
/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Vec`] containing a given list of elements:
///
/// ```
/// use allocator_api2::vec;
/// let v = vec![1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
///
/// ```
/// use allocator_api2::{vec, alloc::Global};
/// let v = vec![in Global; 1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
/// - Create a [`Vec`] from a given element and size:
///
/// ```
/// use allocator_api2::vec;
/// let v = vec![1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// ```
/// use allocator_api2::{vec, alloc::Global};
/// let v = vec![in Global; 1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// Note that unlike array expressions this syntax supports all elements
/// which implement [`Clone`] and the number of elements doesn't have to be
/// a constant.
///
/// This will use `clone` to duplicate an expression, so one should be careful
/// using this with types having a nonstandard `Clone` implementation. For
/// example, `vec![Rc::new(1); 5]` will create a vector of five references
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
/// be mindful of side effects.
///
/// [`Vec`]: crate::vec::Vec
#[cfg(not(no_global_oom_handling))]
#[macro_export]
macro_rules! vec {
(in $alloc:expr $(;)?) => (
$crate::vec::Vec::new_in($alloc)
);
(in $alloc:expr; $elem:expr; $n:expr) => (
$crate::vec::from_elem_in($elem, $n, $alloc)
);
/*
(in $alloc:expr; $($x:expr),+ $(,)?) => (
$crate::boxed::Box::<[_]>::into_vec(
$crate::boxed::Box::slice(
$crate::boxed::Box::new_in([$($x),+], $alloc)
)
)
);
*/
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
/*
($($x:expr),+ $(,)?) => (
$crate::boxed::Box::<[_]>::into_vec(
$crate::boxed::Box::slice(
$crate::boxed::Box::new([$($x),+])
)
)
);
*/
}

View File

@@ -0,0 +1,62 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![allow(clippy::needless_doctest_main, clippy::partialeq_ne_impl)]
#[cfg(feature = "alloc")]
pub use self::slice::SliceExt;
pub mod alloc;
#[cfg(feature = "alloc")]
pub(crate) mod boxed;
#[cfg(feature = "alloc")]
mod raw_vec;
#[cfg(feature = "alloc")]
pub mod vec;
#[cfg(feature = "alloc")]
mod macros;
#[cfg(feature = "alloc")]
mod slice;
#[cfg(feature = "alloc")]
#[track_caller]
#[inline(always)]
#[cfg(debug_assertions)]
unsafe fn assume(v: bool) {
if !v {
core::unreachable!()
}
}
#[cfg(feature = "alloc")]
#[track_caller]
#[inline(always)]
#[cfg(not(debug_assertions))]
unsafe fn assume(v: bool) {
if !v {
unsafe {
core::hint::unreachable_unchecked();
}
}
}
#[cfg(feature = "alloc")]
#[inline(always)]
fn addr<T>(x: *const T) -> usize {
#[allow(clippy::useless_transmute, clippy::transmutes_expressible_as_ptr_casts)]
unsafe {
core::mem::transmute(x)
}
}
#[cfg(feature = "alloc")]
#[inline(always)]
fn invalid_mut<T>(addr: usize) -> *mut T {
#[allow(clippy::useless_transmute, clippy::transmutes_expressible_as_ptr_casts)]
unsafe {
core::mem::transmute(addr)
}
}

View File

@@ -0,0 +1,642 @@
use core::alloc::LayoutError;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull};
use core::slice;
use core::{cmp, fmt};
use super::{
alloc::{Allocator, Global, Layout},
assume,
boxed::Box,
};
#[cfg(not(no_global_oom_handling))]
use super::alloc::handle_alloc_error;
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct TryReserveError {
kind: TryReserveErrorKind,
}
impl TryReserveError {
/// Details about the allocation that caused the error
pub fn kind(&self) -> TryReserveErrorKind {
self.kind.clone()
}
}
/// Details of the allocation that caused a `TryReserveError`
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum TryReserveErrorKind {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// The memory allocator returned an error
AllocError {
/// The layout of allocation request that failed
layout: Layout,
#[doc(hidden)]
non_exhaustive: (),
},
}
use TryReserveErrorKind::*;
impl From<TryReserveErrorKind> for TryReserveError {
#[inline(always)]
fn from(kind: TryReserveErrorKind) -> Self {
Self { kind }
}
}
impl From<LayoutError> for TryReserveErrorKind {
/// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
#[inline(always)]
fn from(_: LayoutError) -> Self {
TryReserveErrorKind::CapacityOverflow
}
}
impl fmt::Display for TryReserveError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
fmt.write_str("memory allocation failed")?;
let reason = match self.kind {
TryReserveErrorKind::CapacityOverflow => {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
" because the memory allocator returned an error"
}
};
fmt.write_str(reason)
}
}
#[cfg(feature = "std")]
impl std::error::Error for TryReserveError {}
#[cfg(not(no_global_oom_handling))]
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces `NonNull::dangling()` on zero-sized types.
/// * Produces `NonNull::dangling()` on zero-length allocations.
/// * Avoids freeing `NonNull::dangling()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::NonNull` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: NonNull<T>,
cap: usize,
alloc: A,
}
// Safety: RawVec owns both T and A, so sending is safe if
// sending is safe for T and A.
unsafe impl<T, A: Allocator> Send for RawVec<T, A>
where
T: Send,
A: Send,
{
}
// Safety: RawVec owns both T and A, so sharing is safe if
// sharing is safe for T and A.
unsafe impl<T, A: Allocator> Sync for RawVec<T, A>
where
T: Sync,
A: Sync,
{
}
impl<T> RawVec<T, Global> {
/// Creates the biggest possible `RawVec` (on the system heap)
/// without allocating. If `T` has positive size, then this makes a
/// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
#[must_use]
pub const fn new() -> Self {
Self::new_in(Global)
}
/// Creates a `RawVec` (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; capacity]`. This is
/// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a `RawVec` with the requested capacity.
///
/// # Panics
///
/// Panics if the requested capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[inline(always)]
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[inline(always)]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
#[inline(always)]
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self {
ptr: NonNull::dangling(),
cap: 0,
alloc,
}
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Safety
///
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
#[inline(always)]
pub(crate) unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
let me = ManuallyDrop::new(self);
unsafe {
let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw_in(slice, ptr::read(&me.alloc))
}
}
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if mem::size_of::<T>() == 0 || capacity == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => capacity_overflow(),
};
match alloc_guard(layout.size()) {
Ok(_) => {}
Err(_) => capacity_overflow(),
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
};
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) },
cap: capacity,
alloc,
}
}
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline(always)]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
Self {
ptr: unsafe { NonNull::new_unchecked(ptr) },
cap: capacity,
alloc,
}
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `NonNull::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
#[inline(always)]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 {
usize::MAX
} else {
self.cap
}
}
/// Returns a shared reference to the allocator backing this `RawVec`.
#[inline(always)]
pub fn allocator(&self) -> &A {
&self.alloc
}
#[inline(always)]
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
Some((self.ptr.cast(), layout))
}
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
/// reallocate enough space plus comfortable slack space to get amortized
/// *O*(1) behavior. Will limit this behavior if it would needlessly cause
/// itself to panic.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
#[inline(always)]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
handle_reserve(slf.grow_amortized(len, additional));
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
}
/// A specialized version of `reserve()` used only by the hot and
/// oft-instantiated `Vec::push()`, which does its own capacity check.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn reserve_for_push(&mut self, len: usize) {
handle_reserve(self.grow_amortized(len, 1));
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
#[inline(always)]
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)
} else {
Ok(())
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already, will reallocate the
/// minimum possible amount of memory necessary. Generally this will be
/// exactly the amount of memory necessary, but in principle the allocator
/// is free to give back more than we asked for.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
handle_reserve(self.try_reserve_exact(len, additional));
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
#[inline(always)]
pub fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_exact(len, additional)
} else {
Ok(())
}
}
/// Shrinks the buffer down to the specified capacity. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn shrink_to_fit(&mut self, cap: usize) {
handle_reserve(self.shrink(cap));
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
#[inline(always)]
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
}
#[inline(always)]
fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) };
self.cap = cap;
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
#[inline(always)]
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
#[inline(always)]
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(
cap <= self.capacity(),
"Tried to shrink to a larger capacity"
);
let (ptr, layout) = if let Some(mem) = self.current_memory() {
mem
} else {
return Ok(());
};
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError {
layout: new_layout,
non_exhaustive: (),
})?
};
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(always)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
assume(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {
alloc.allocate(new_layout)
};
memory.map_err(|_| {
AllocError {
layout: new_layout,
non_exhaustive: (),
}
.into()
})
}
impl<T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
#[inline(always)]
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn handle_reserve(result: Result<(), TryReserveError>) {
match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space, e.g., PAE or x32.
#[inline(always)]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow.into())
} else {
Ok(())
}
}
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}

View File

@@ -0,0 +1,171 @@
use crate::{
alloc::{Allocator, Global},
vec::Vec,
};
/// Slice methods that use `Box` and `Vec` from this crate.
pub trait SliceExt<T> {
/// Copies `self` into a new `Vec`.
///
/// # Examples
///
/// ```
/// let s = [10, 40, 30];
/// let x = s.to_vec();
/// // Here, `s` and `x` can be modified independently.
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn to_vec(&self) -> Vec<T, Global>
where
T: Clone,
{
self.to_vec_in(Global)
}
/// Copies `self` into a new `Vec` with an allocator.
///
/// # Examples
///
/// ```
/// #![feature(allocator_api)]
///
/// use std::alloc::System;
///
/// let s = [10, 40, 30];
/// let x = s.to_vec_in(System);
/// // Here, `s` and `x` can be modified independently.
/// ```
#[cfg(not(no_global_oom_handling))]
fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
where
T: Clone;
/// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
/// This function will panic if the capacity would overflow.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
/// ```
///
/// A panic upon overflow:
///
/// ```should_panic
/// // this will panic at runtime
/// b"0123456789abcdef".repeat(usize::MAX);
/// ```
fn repeat(&self, n: usize) -> Vec<T, Global>
where
T: Copy;
}
impl<T> SliceExt<T> for [T] {
#[cfg(not(no_global_oom_handling))]
#[inline]
fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
where
T: Clone,
{
struct DropGuard<'a, T, A: Allocator> {
vec: &'a mut Vec<T, A>,
num_init: usize,
}
impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
#[inline]
fn drop(&mut self) {
// SAFETY:
// items were marked initialized in the loop below
unsafe {
self.vec.set_len(self.num_init);
}
}
}
let mut vec = Vec::with_capacity_in(self.len(), alloc);
let mut guard = DropGuard {
vec: &mut vec,
num_init: 0,
};
let slots = guard.vec.spare_capacity_mut();
// .take(slots.len()) is necessary for LLVM to remove bounds checks
// and has better codegen than zip.
for (i, b) in self.iter().enumerate().take(slots.len()) {
guard.num_init = i;
slots[i].write(b.clone());
}
core::mem::forget(guard);
// SAFETY:
// the vec was allocated and initialized above to at least this length.
unsafe {
vec.set_len(self.len());
}
vec
}
#[cfg(not(no_global_oom_handling))]
#[inline]
fn repeat(&self, n: usize) -> Vec<T, Global>
where
T: Copy,
{
if n == 0 {
return Vec::new();
}
// If `n` is larger than zero, it can be split as
// `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
// `2^expn` is the number represented by the leftmost '1' bit of `n`,
// and `rem` is the remaining part of `n`.
// Using `Vec` to access `set_len()`.
let capacity = self.len().checked_mul(n).expect("capacity overflow");
let mut buf = Vec::with_capacity(capacity);
// `2^expn` repetition is done by doubling `buf` `expn`-times.
buf.extend(self);
{
let mut m = n >> 1;
// If `m > 0`, there are remaining bits up to the leftmost '1'.
while m > 0 {
// `buf.extend(buf)`:
unsafe {
core::ptr::copy_nonoverlapping(
buf.as_ptr(),
(buf.as_mut_ptr() as *mut T).add(buf.len()),
buf.len(),
);
// `buf` has capacity of `self.len() * n`.
let buf_len = buf.len();
buf.set_len(buf_len * 2);
}
m >>= 1;
}
}
// `rem` (`= n - 2^expn`) repetition is done by copying
// first `rem` repetitions from `buf` itself.
let rem_len = capacity - buf.len(); // `self.len() * rem`
if rem_len > 0 {
// `buf.extend(buf[0 .. rem_len])`:
unsafe {
// This is non-overlapping since `2^expn > rem`.
core::ptr::copy_nonoverlapping(
buf.as_ptr(),
(buf.as_mut_ptr() as *mut T).add(buf.len()),
rem_len,
);
// `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
buf.set_len(capacity);
}
}
buf
}
}

View File

@@ -0,0 +1,242 @@
use core::fmt;
use core::iter::FusedIterator;
use core::mem::{self, size_of, ManuallyDrop};
use core::ptr::{self, NonNull};
use core::slice::{self};
use crate::stable::alloc::{Allocator, Global};
use super::Vec;
/// A draining iterator for `Vec<T>`.
///
/// This `struct` is created by [`Vec::drain`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::Drain<_> = v.drain(..);
/// ```
pub struct Drain<'a, T: 'a, A: Allocator + 'a = Global> {
/// Index of tail to preserve
pub(super) tail_start: usize,
/// Length of tail
pub(super) tail_len: usize,
/// Current remaining range to remove
pub(super) iter: slice::Iter<'a, T>,
pub(super) vec: NonNull<Vec<T, A>>,
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
}
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
/// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
/// let _ = drain.next().unwrap();
/// assert_eq!(drain.as_slice(), &['b', 'c']);
/// ```
#[must_use]
#[inline(always)]
pub fn as_slice(&self) -> &[T] {
self.iter.as_slice()
}
/// Returns a reference to the underlying allocator.
#[must_use]
#[inline(always)]
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
/// Keep unyielded elements in the source `Vec`.
///
/// # Examples
///
/// ```
/// #![feature(drain_keep_rest)]
///
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
///
/// assert_eq!(drain.next().unwrap(), 'a');
///
/// // This call keeps 'b' and 'c' in the vec.
/// drain.keep_rest();
///
/// // If we wouldn't call `keep_rest()`,
/// // `vec` would be empty.
/// assert_eq!(vec, ['b', 'c']);
/// ```
#[inline(always)]
pub fn keep_rest(self) {
// At this moment layout looks like this:
//
// [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
// ^-- start \_________/-- unyielded_len \____/-- self.tail_len
// ^-- unyielded_ptr ^-- tail
//
// Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
// Here we want to
// 1. Move [unyielded] to `start`
// 2. Move [tail] to a new start at `start + len(unyielded)`
// 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
// a. In case of ZST, this is the only thing we want to do
// 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
let mut this = ManuallyDrop::new(self);
unsafe {
let source_vec = this.vec.as_mut();
let start = source_vec.len();
let tail = this.tail_start;
let unyielded_len = this.iter.len();
let unyielded_ptr = this.iter.as_slice().as_ptr();
// ZSTs have no identity, so we don't need to move them around.
let needs_move = mem::size_of::<T>() != 0;
if needs_move {
let start_ptr = source_vec.as_mut_ptr().add(start);
// memmove back unyielded elements
if unyielded_ptr != start_ptr {
let src = unyielded_ptr;
let dst = start_ptr;
ptr::copy(src, dst, unyielded_len);
}
// memmove back untouched tail
if tail != (start + unyielded_len) {
let src = source_vec.as_ptr().add(tail);
let dst = start_ptr.add(unyielded_len);
ptr::copy(src, dst, this.tail_len);
}
}
source_vec.set_len(start + unyielded_len + this.tail_len);
}
}
}
impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
#[inline(always)]
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
type Item = T;
#[inline(always)]
fn next(&mut self) -> Option<T> {
self.iter
.next()
.map(|elt| unsafe { ptr::read(elt as *const _) })
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline(always)]
fn next_back(&mut self) -> Option<T> {
self.iter
.next_back()
.map(|elt| unsafe { ptr::read(elt as *const _) })
}
}
impl<T, A: Allocator> Drop for Drain<'_, T, A> {
#[inline]
fn drop(&mut self) {
/// Moves back the un-`Drain`ed elements to restore the original `Vec`.
struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
if self.0.tail_len > 0 {
unsafe {
let source_vec = self.0.vec.as_mut();
// memmove back untouched tail, update to new length
let start = source_vec.len();
let tail = self.0.tail_start;
if tail != start {
let src = source_vec.as_ptr().add(tail);
let dst = source_vec.as_mut_ptr().add(start);
ptr::copy(src, dst, self.0.tail_len);
}
source_vec.set_len(start + self.0.tail_len);
}
}
}
}
let iter = mem::replace(&mut self.iter, [].iter());
let drop_len = iter.len();
let mut vec = self.vec;
if size_of::<T>() == 0 {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
let vec = vec.as_mut();
let old_len = vec.len();
vec.set_len(old_len + drop_len + self.tail_len);
vec.truncate(old_len + self.tail_len);
}
return;
}
// ensure elements are moved back into their appropriate places, even when drop_in_place panics
let _guard = DropGuard(self);
if drop_len == 0 {
return;
}
// as_slice() must only be called when iter.len() is > 0 because
// vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
// the iterator's internal pointers. Creating a reference to deallocated memory
// is invalid even when it is zero-length
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
// drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
// a pointer with mutable provenance is necessary. Therefore we must reconstruct
// it from the original vec but also avoid creating a &mut to the front since that could
// invalidate raw pointers to it which some unsafe code might rely on.
let vec_ptr = vec.as_mut().as_mut_ptr();
let drop_offset = drop_ptr.offset_from(vec_ptr) as usize;
let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
ptr::drop_in_place(to_drop);
}
}
}
impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}

View File

@@ -0,0 +1,198 @@
use core::fmt;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::{self, size_of, ManuallyDrop};
use core::ptr::{self, NonNull};
use core::slice::{self};
use crate::stable::addr;
use super::{Allocator, Global, RawVec};
#[cfg(not(no_global_oom_handling))]
use super::Vec;
/// An iterator that moves out of a vector.
///
/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
/// (provided by the [`IntoIterator`] trait).
///
/// # Example
///
/// ```
/// let v = vec![0, 1, 2];
/// let iter: std::vec::IntoIter<_> = v.into_iter();
/// ```
pub struct IntoIter<T, A: Allocator = Global> {
pub(super) buf: NonNull<T>,
pub(super) phantom: PhantomData<T>,
pub(super) cap: usize,
// the drop impl reconstructs a RawVec from buf, cap and alloc
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: *const T,
pub(super) end: *const T,
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
}
}
impl<T, A: Allocator> IntoIter<T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// let _ = into_iter.next().unwrap();
/// assert_eq!(into_iter.as_slice(), &['b', 'c']);
/// ```
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.ptr, self.len()) }
}
/// Returns the remaining items of this iterator as a mutable slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// into_iter.as_mut_slice()[2] = 'z';
/// assert_eq!(into_iter.next().unwrap(), 'a');
/// assert_eq!(into_iter.next().unwrap(), 'b');
/// assert_eq!(into_iter.next().unwrap(), 'z');
/// ```
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { &mut *self.as_raw_mut_slice() }
}
/// Returns a reference to the underlying allocator.
#[inline(always)]
pub fn allocator(&self) -> &A {
&self.alloc
}
fn as_raw_mut_slice(&mut self) -> *mut [T] {
ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
}
}
impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
impl<T, A: Allocator> Iterator for IntoIter<T, A> {
type Item = T;
#[inline(always)]
fn next(&mut self) -> Option<T> {
if self.ptr == self.end {
None
} else if size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
self.ptr = self.ptr.cast::<u8>().wrapping_add(1).cast();
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if size_of::<T>() == 0 {
addr(self.end).wrapping_sub(addr(self.ptr))
} else {
unsafe { self.end.offset_from(self.ptr) as usize }
};
(exact, Some(exact))
}
#[inline(always)]
fn count(self) -> usize {
self.len()
}
}
impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline(always)]
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
} else if size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
self.end = self.end.cast::<u8>().wrapping_add(1).cast();
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
}
}
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {}
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[doc(hidden)]
pub trait NonDrop {}
// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
// and thus we can't implement drop-handling
impl<T: Copy> NonDrop for T {}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
fn clone(&self) -> Self {
let mut vec = Vec::<T, A>::with_capacity_in(self.len(), (*self.alloc).clone());
vec.extend(self.as_slice().iter().cloned());
vec.into_iter()
}
}
impl<T, A: Allocator> Drop for IntoIter<T, A> {
fn drop(&mut self) {
struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
fn drop(&mut self) {
unsafe {
// `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
let alloc = ManuallyDrop::take(&mut self.0.alloc);
// RawVec handles deallocation
let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
}
}
}
let guard = DropGuard(self);
// destroy the remaining elements
unsafe {
ptr::drop_in_place(guard.0.as_raw_mut_slice());
}
// now `guard` will be dropped and do the rest
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
#[cfg(not(no_global_oom_handling))]
use alloc_crate::borrow::Cow;
use crate::stable::alloc::Allocator;
use super::Vec;
macro_rules! __impl_slice_eq1 {
([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?) => {
impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
where
T: PartialEq<U>,
$($ty: $bound)?
{
#[inline(always)]
fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
#[inline(always)]
fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
}
}
}
__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2> }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U] }
__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A> }
__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A> }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U] }
__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A> }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N] }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N] }
// NOTE: some less important impls are omitted to reduce code bloat
// FIXME(Centril): Reconsider this?
//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }

View File

@@ -0,0 +1,31 @@
// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
//
// The idea is: The length field in SetLenOnDrop is a local variable
// that the optimizer will see does not alias with any stores through the Vec's data
// pointer. This is a workaround for alias analysis issue #32155
pub(super) struct SetLenOnDrop<'a> {
len: &'a mut usize,
local_len: usize,
}
impl<'a> SetLenOnDrop<'a> {
#[inline(always)]
pub(super) fn new(len: &'a mut usize) -> Self {
SetLenOnDrop {
local_len: *len,
len,
}
}
#[inline(always)]
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
}
impl Drop for SetLenOnDrop<'_> {
#[inline(always)]
fn drop(&mut self) {
*self.len = self.local_len;
}
}

View File

@@ -0,0 +1,135 @@
use core::ptr::{self};
use core::slice::{self};
use crate::stable::alloc::{Allocator, Global};
use super::{Drain, Vec};
/// A splicing iterator for `Vec`.
///
/// This struct is created by [`Vec::splice()`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let new = [7, 8];
/// let iter: std::vec::Splice<_> = v.splice(1.., new);
/// ```
#[derive(Debug)]
pub struct Splice<'a, I: Iterator + 'a, A: Allocator + 'a = Global> {
pub(super) drain: Drain<'a, I::Item, A>,
pub(super) replace_with: I,
}
impl<I: Iterator, A: Allocator> Iterator for Splice<'_, I, A> {
type Item = I::Item;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
self.drain.next()
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.drain.size_hint()
}
}
impl<I: Iterator, A: Allocator> DoubleEndedIterator for Splice<'_, I, A> {
#[inline(always)]
fn next_back(&mut self) -> Option<Self::Item> {
self.drain.next_back()
}
}
impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
#[inline]
fn drop(&mut self) {
self.drain.by_ref().for_each(drop);
unsafe {
if self.drain.tail_len == 0 {
self.drain.vec.as_mut().extend(self.replace_with.by_ref());
return;
}
// First fill the range left by drain().
if !self.drain.fill(&mut self.replace_with) {
return;
}
// There may be more elements. Use the lower bound as an estimate.
// FIXME: Is the upper bound a better guess? Or something else?
let (lower_bound, _upper_bound) = self.replace_with.size_hint();
if lower_bound > 0 {
self.drain.move_tail(lower_bound);
if !self.drain.fill(&mut self.replace_with) {
return;
}
}
// Collect any remaining elements.
// This is a zero-length vector which does not allocate if `lower_bound` was exact.
let mut collected = self
.replace_with
.by_ref()
.collect::<Vec<I::Item>>()
.into_iter();
// Now we have an exact count.
if collected.len() > 0 {
self.drain.move_tail(collected.len());
let filled = self.drain.fill(&mut collected);
debug_assert!(filled);
debug_assert_eq!(collected.len(), 0);
}
}
// Let `Drain::drop` move the tail back if necessary and restore `vec.len`.
}
}
/// Private helper methods for `Splice::drop`
impl<T, A: Allocator> Drain<'_, T, A> {
/// The range from `self.vec.len` to `self.tail_start` contains elements
/// that have been moved out.
/// Fill that range as much as possible with new elements from the `replace_with` iterator.
/// Returns `true` if we filled the entire range. (`replace_with.next()` didnt return `None`.)
#[inline(always)]
unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool {
let vec = unsafe { self.vec.as_mut() };
let range_start = vec.len;
let range_end = self.tail_start;
let range_slice = unsafe {
slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start)
};
for place in range_slice {
if let Some(new_item) = replace_with.next() {
unsafe { ptr::write(place, new_item) };
vec.len += 1;
} else {
return false;
}
}
true
}
/// Makes room for inserting more elements before the tail.
#[inline(always)]
unsafe fn move_tail(&mut self, additional: usize) {
let vec = unsafe { self.vec.as_mut() };
let len = self.tail_start + self.tail_len;
vec.buf.reserve(len, additional);
let new_tail_start = self.tail_start + additional;
unsafe {
let src = vec.as_ptr().add(self.tail_start);
let dst = vec.as_mut_ptr().add(new_tail_start);
ptr::copy(src, dst, self.tail_len);
}
self.tail_start = new_tail_start;
}
}