Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 110 additions & 14 deletions src/bytes_mut.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ use crate::bytes::Vtable;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::try_alloc::{
AllocStrategy, FallibleAllocStrategy, PanickingAllocStrategy, TryReserveError,
};
use crate::{Buf, BufMut, Bytes, TryGetError};

/// A unique reference to a contiguous slice of memory.
Expand Down Expand Up @@ -599,12 +602,99 @@ impl BytesMut {
}

// will always succeed
let _ = self.reserve_inner(additional, true);
let _ = self.reserve_inner(additional, true, PanickingAllocStrategy);
}

/// Reserves capacity for at least `additional` more bytes to be inserted
/// into the given `BytesMut`.
///
/// More than `additional` bytes may be reserved in order to avoid frequent
/// reallocations. A call to `reserve` may result in an allocation.
///
/// Before allocating new buffer space, the function will attempt to reclaim
/// space in the existing buffer. If the current handle references a view
/// into a larger original buffer, and all other handles referencing part
/// of the same original buffer have been dropped, then the current view
/// can be copied/shifted to the front of the buffer and the handle can take
/// ownership of the full buffer, provided that the full buffer is large
/// enough to fit the requested additional capacity.
///
/// This optimization will only happen if shifting the data from the current
/// view to the front of the buffer is not too expensive in terms of the
/// (amortized) time required. The precise condition is subject to change;
/// as of now, the length of the data being shifted needs to be at least as
/// large as the distance that it's shifted by. If the current view is empty
/// and the original buffer is large enough to fit the requested additional
/// capacity, then reallocations will never happen.
///
/// # Examples
///
/// In the following example, a new buffer is allocated.
///
/// ```
/// # fn main() -> Result<(), bytes::TryReserveError> {
/// use bytes::BytesMut;
///
/// let mut buf = BytesMut::from(&b"hello"[..]);
/// buf.try_reserve(64)?;
/// assert!(buf.capacity() >= 69);
/// # Ok(())
/// # }
/// ```
///
/// In the following example, the existing buffer is reclaimed.
///
/// ```
/// # fn main() -> Result<(), bytes::TryReserveError> {
/// use bytes::{BytesMut, BufMut};
///
/// let mut buf = BytesMut::with_capacity(128);
/// buf.put(&[0; 64][..]);
///
/// let ptr = buf.as_ptr();
/// let other = buf.split();
///
/// assert!(buf.is_empty());
/// assert_eq!(buf.capacity(), 64);
///
/// drop(other);
/// buf.try_reserve(128)?;
///
/// assert_eq!(buf.capacity(), 128);
/// assert_eq!(buf.as_ptr(), ptr);
/// # Ok(())
/// # }
/// ```
///
/// # Errors
///
/// Errors if the new capacity overflows `usize` or the underlying allocator fails.
#[inline]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
let len = self.len();
let rem = self.capacity() - len;

if additional <= rem {
// The handle can already store at least `additional` more bytes, so
// there is no further work needed to be done.
return Ok(());
}

self.reserve_inner(additional, true, FallibleAllocStrategy)
.map(|_| ())
}

// In separate function to allow the short-circuits in `reserve` and `try_reclaim` to
// be inline-able. Significantly helps performance. Returns false if it did not succeed.
fn reserve_inner(&mut self, additional: usize, allocate: bool) -> bool {
fn reserve_inner<S>(
&mut self,
additional: usize,
allocate: bool,
strategy: S,
) -> Result<bool, S::Err>
where
S: AllocStrategy,
{
let len = self.len();
let kind = self.kind();

Expand Down Expand Up @@ -650,21 +740,21 @@ impl BytesMut {
self.cap += off;
} else {
if !allocate {
return false;
return Ok(false);
}
// Not enough space, or reusing might be too much overhead:
// allocate more space!
let mut v =
ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
v.reserve(additional);
strategy.fallible_reserve(&mut v, additional)?;

// Update the info
self.ptr = vptr(v.as_mut_ptr().add(off));
self.cap = v.capacity() - off;
debug_assert_eq!(self.len, v.len() - off);
}

return true;
return Ok(true);
}
}

Expand All @@ -677,8 +767,8 @@ impl BytesMut {
// Compute the new capacity
let mut new_cap = match len.checked_add(additional) {
Some(new_cap) => new_cap,
None if !allocate => return false,
None => panic!("overflow"),
None if !allocate => return Ok(false),
None => return Err(strategy.capacity_overflow()),
};

unsafe {
Expand Down Expand Up @@ -711,7 +801,7 @@ impl BytesMut {
self.cap = v.capacity();
} else {
if !allocate {
return false;
return Ok(false);
}
// calculate offset
let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
Expand All @@ -720,7 +810,9 @@ impl BytesMut {
// `Vec`, so it does not take the offset into account.
//
// Thus we have to manually add it here.
new_cap = new_cap.checked_add(off).expect("overflow");
new_cap = new_cap
.checked_add(off)
.ok_or_else(|| strategy.capacity_overflow())?;

// The vector capacity is not sufficient. The reserve request is
// asking for more than the initial buffer capacity. Allocate more
Expand All @@ -744,18 +836,19 @@ impl BytesMut {
// care about in the unused capacity before calling `reserve`.
debug_assert!(off + len <= v.capacity());
v.set_len(off + len);
v.reserve(new_cap - v.len());
let additional = new_cap - v.len();
strategy.fallible_reserve(v, additional)?;

// Update the info
self.ptr = vptr(v.as_mut_ptr().add(off));
self.cap = v.capacity() - off;
}

return true;
return Ok(true);
}
}
if !allocate {
return false;
return Ok(false);
}

let original_capacity_repr = unsafe { (*shared).original_capacity_repr };
Expand All @@ -779,7 +872,7 @@ impl BytesMut {
self.ptr = vptr(v.as_mut_ptr());
self.cap = v.capacity();
debug_assert_eq!(self.len, v.len());
true
Ok(true)
}

/// Attempts to cheaply reclaim already allocated capacity for at least `additional` more
Expand Down Expand Up @@ -839,7 +932,10 @@ impl BytesMut {
return true;
}

self.reserve_inner(additional, false)
match self.reserve_inner(additional, false, PanickingAllocStrategy) {
Ok(reclaimed) => reclaimed,
Err(never) => match never {},
}
}

/// Appends given bytes to this `BytesMut`.
Expand Down
2 changes: 2 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,10 @@ mod bytes;
mod bytes_mut;
mod fmt;
mod loom;
mod try_alloc;
pub use crate::bytes::Bytes;
pub use crate::bytes_mut::BytesMut;
pub use crate::try_alloc::TryReserveError;

// Optional Serde support
#[cfg(feature = "serde")]
Expand Down
76 changes: 76 additions & 0 deletions src/try_alloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
use alloc::vec::Vec;
use core::{
convert::Infallible,
fmt::{self, Display},
};
#[cfg(feature = "std")]
use std::error::Error;

/// The error type for try_reserve methods.
#[derive(Debug)]
pub struct TryReserveError(TryReserveErrorInner);

#[derive(Debug)]
pub(crate) enum TryReserveErrorInner {
Std(alloc::collections::TryReserveError),
Overflow,
}

impl Display for TryReserveError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.0 {
TryReserveErrorInner::Std(err) => Display::fmt(err, f),
TryReserveErrorInner::Overflow => f.write_str("memory allocation failed because the computed capacity exceeded the collection's maximum"),
}
}
}

#[cfg(feature = "std")]
impl Error for TryReserveError {}

/// The allocation strategy
///
/// # Safety
///
/// `fallible_reserve` must behave the same as `Vec::reserve` or
/// `Vec::try_reserve`.
pub(crate) unsafe trait AllocStrategy {
type Err;

fn fallible_reserve<T>(&self, vec: &mut Vec<T>, additional: usize) -> Result<(), Self::Err>;

#[track_caller]
fn capacity_overflow(&self) -> Self::Err;
}

pub(crate) struct PanickingAllocStrategy;

unsafe impl AllocStrategy for PanickingAllocStrategy {
type Err = Infallible;

fn fallible_reserve<T>(&self, vec: &mut Vec<T>, additional: usize) -> Result<(), Self::Err> {
vec.reserve(additional);
Ok(())
}

#[track_caller]
fn capacity_overflow(&self) -> Self::Err {
panic!("overflow")
}
}

pub(crate) struct FallibleAllocStrategy;

unsafe impl AllocStrategy for FallibleAllocStrategy {
type Err = TryReserveError;

fn fallible_reserve<T>(&self, vec: &mut Vec<T>, additional: usize) -> Result<(), Self::Err> {
vec.try_reserve(additional)
.map_err(|err| TryReserveError(TryReserveErrorInner::Std(err)))
}

#[track_caller]
fn capacity_overflow(&self) -> Self::Err {
TryReserveError(TryReserveErrorInner::Overflow)
}
}
12 changes: 12 additions & 0 deletions tests/test_bytes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1028,6 +1028,18 @@ fn bytes_reserve_overflow() {
bytes.reserve(usize::MAX);
}

#[test]
fn bytes_try_reserve_overflow() {
let mut bytes = BytesMut::with_capacity(1024);
bytes.put_slice(b"hello world");

let err = bytes.try_reserve(usize::MAX).unwrap_err();
assert_eq!(
"memory allocation failed because the computed capacity exceeded the collection's maximum",
err.to_string()
);
}

#[test]
fn bytes_with_capacity_but_empty() {
// See https://github.com/tokio-rs/bytes/issues/340
Expand Down
Loading