aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar brian m. carlson <sandals@crustytoothpaste.net> 2023-01-31 19:04:22 +0000
committerGravatar GitHub <noreply@github.com> 2023-01-31 20:04:22 +0100
commit05e9d5cab95e5ce1d57dd498a9567639de50f841 (patch)
tree5b9b541ab3026241be0f0b055bd3c001c7bd95cf /src
parentf15bba3375f7e75f412b085f75b3112cceaa6ef1 (diff)
downloadbytes-05e9d5cab95e5ce1d57dd498a9567639de50f841.tar.gz
bytes-05e9d5cab95e5ce1d57dd498a9567639de50f841.tar.zst
bytes-05e9d5cab95e5ce1d57dd498a9567639de50f841.zip
Avoid large reallocations when freezing BytesMut (#592)
When we freeze a BytesMut, we turn it into a Vec, and then convert that to a Bytes. Currently, this happen using Vec::into_boxed_slice, which reallocates to a slice of the same length as the Vev if the length and the capacity are not equal. This can pose a performance problem if the Vec is large or if this happens many times in a loop. Instead, let's compare the length and capacity, and if they're the same, continue to handle this using into_boxed_slice. Otherwise, since we have a type of vtable which can handle a separate capacity, the shared vtable, let's turn our Vec into that kind of Bytes. While this does not avoid allocation altogether, it performs a fixed size allocation and avoids any need to memcpy.
Diffstat (limited to 'src')
-rw-r--r--src/bytes.rs32
1 files changed, 30 insertions, 2 deletions
diff --git a/src/bytes.rs b/src/bytes.rs
index b4745a9..0404a72 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -807,8 +807,36 @@ impl From<&'static str> for Bytes {
impl From<Vec<u8>> for Bytes {
fn from(vec: Vec<u8>) -> Bytes {
- let slice = vec.into_boxed_slice();
- slice.into()
+ let mut vec = vec;
+ let ptr = vec.as_mut_ptr();
+ let len = vec.len();
+ let cap = vec.capacity();
+
+ // Avoid an extra allocation if possible.
+ if len == cap {
+ return Bytes::from(vec.into_boxed_slice());
+ }
+
+ let shared = Box::new(Shared {
+ buf: ptr,
+ cap,
+ ref_cnt: AtomicUsize::new(1),
+ });
+ mem::forget(vec);
+
+ let shared = Box::into_raw(shared);
+ // The pointer should be aligned, so this assert should
+ // always succeed.
+ debug_assert!(
+ 0 == (shared as usize & KIND_MASK),
+ "internal: Box<Shared> should have an aligned pointer",
+ );
+ Bytes {
+ ptr,
+ len,
+ data: AtomicPtr::new(shared as _),
+ vtable: &SHARED_VTABLE,
+ }
}
}