aboutsummaryrefslogtreecommitdiff
path: root/src/bytes_mut.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/bytes_mut.rs')
-rw-r--r--src/bytes_mut.rs111
1 files changed, 65 insertions, 46 deletions
diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs
index e0630cb..5178198 100644
--- a/src/bytes_mut.rs
+++ b/src/bytes_mut.rs
@@ -1,18 +1,22 @@
-use core::{cmp, fmt, hash, isize, slice, usize};
+use core::iter::{FromIterator, Iterator};
use core::mem::{self, ManuallyDrop};
use core::ops::{Deref, DerefMut};
use core::ptr::{self, NonNull};
-use core::iter::{FromIterator, Iterator};
+use core::{cmp, fmt, hash, isize, slice, usize};
-use alloc::{vec::Vec, string::String, boxed::Box, borrow::{Borrow, BorrowMut}};
+use alloc::{
+ borrow::{Borrow, BorrowMut},
+ boxed::Box,
+ string::String,
+ vec::Vec,
+};
-use crate::{Bytes, Buf, BufMut};
-use crate::bytes::Vtable;
use crate::buf::IntoIter;
-use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::bytes::Vtable;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
-
+use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::{Buf, BufMut, Bytes};
/// A unique reference to a contiguous slice of memory.
///
@@ -247,9 +251,7 @@ impl BytesMut {
let len = self.len;
let data = AtomicPtr::new(self.data as _);
mem::forget(self);
- unsafe {
- Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
- }
+ unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
}
}
@@ -391,7 +393,9 @@ impl BytesMut {
/// [`split_off`]: #method.split_off
pub fn truncate(&mut self, len: usize) {
if len <= self.len() {
- unsafe { self.set_len(len); }
+ unsafe {
+ self.set_len(len);
+ }
}
}
@@ -572,7 +576,8 @@ impl BytesMut {
self.cap += off;
} else {
// No space - allocate more
- let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+ let mut v =
+ ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
v.reserve(additional);
// Update the info
@@ -588,7 +593,6 @@ impl BytesMut {
debug_assert_eq!(kind, KIND_ARC);
let shared: *mut Shared = self.data as _;
-
// Reserving involves abandoning the currently shared buffer and
// allocating a new vector with the requested capacity.
//
@@ -632,9 +636,7 @@ impl BytesMut {
// check.
let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
- new_cap = cmp::max(
- cmp::max(double, new_cap),
- original_capacity);
+ new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
} else {
new_cap = cmp::max(new_cap, original_capacity);
}
@@ -683,14 +685,12 @@ impl BytesMut {
// Reserved above
debug_assert!(dst.len() >= cnt);
- ptr::copy_nonoverlapping(
- extend.as_ptr(),
- dst.as_mut_ptr() as *mut u8,
- cnt);
-
+ ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
}
- unsafe { self.advance_mut(cnt); }
+ unsafe {
+ self.advance_mut(cnt);
+ }
}
/// Absorbs a `BytesMut` that was previously split off.
@@ -755,16 +755,12 @@ impl BytesMut {
#[inline]
fn as_slice(&self) -> &[u8] {
- unsafe {
- slice::from_raw_parts(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
}
#[inline]
fn as_slice_mut(&mut self) -> &mut [u8] {
- unsafe {
- slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
}
unsafe fn set_start(&mut self, start: usize) {
@@ -793,7 +789,7 @@ impl BytesMut {
// on 64 bit systems and will only happen on 32 bit systems
// when shifting past 134,217,727 bytes. As such, we don't
// worry too much about performance here.
- self.promote_to_shared(/*ref_count = */1);
+ self.promote_to_shared(/*ref_count = */ 1);
}
}
@@ -825,10 +821,10 @@ impl BytesMut {
}
let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
- if ptr == other.ptr.as_ptr() &&
- self.kind() == KIND_ARC &&
- other.kind() == KIND_ARC &&
- self.data == other.data
+ if ptr == other.ptr.as_ptr()
+ && self.kind() == KIND_ARC
+ && other.kind() == KIND_ARC
+ && self.data == other.data
{
// Contiguous blocks, just combine directly
self.len += other.len;
@@ -889,7 +885,7 @@ impl BytesMut {
increment_shared(self.data);
ptr::read(self)
} else {
- self.promote_to_shared(/*ref_count = */2);
+ self.promote_to_shared(/*ref_count = */ 2);
ptr::read(self)
}
}
@@ -957,7 +953,9 @@ impl Buf for BytesMut {
cnt,
self.remaining(),
);
- unsafe { self.set_start(cnt); }
+ unsafe {
+ self.set_start(cnt);
+ }
}
fn to_bytes(&mut self) -> crate::Bytes {
@@ -974,7 +972,12 @@ impl BufMut for BytesMut {
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
let new_len = self.len() + cnt;
- assert!(new_len <= self.cap, "new_len = {}; capacity = {}", new_len, self.cap);
+ assert!(
+ new_len <= self.cap,
+ "new_len = {}; capacity = {}",
+ new_len,
+ self.cap
+ );
self.len = new_len;
}
@@ -989,7 +992,10 @@ impl BufMut for BytesMut {
// Specialize these methods so they can skip checking `remaining_mut`
// and `advance_mut`.
- fn put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized {
+ fn put<T: crate::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
while src.has_remaining() {
let s = src.bytes();
let l = s.len();
@@ -1068,8 +1074,7 @@ impl Ord for BytesMut {
}
}
-impl Eq for BytesMut {
-}
+impl Eq for BytesMut {}
impl Default for BytesMut {
#[inline]
@@ -1079,7 +1084,10 @@ impl Default for BytesMut {
}
impl hash::Hash for BytesMut {
- fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
let s: &[u8] = self.as_ref();
s.hash(state);
}
@@ -1139,7 +1147,10 @@ impl<'a> IntoIterator for &'a BytesMut {
}
impl Extend<u8> for BytesMut {
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = u8>,
+ {
let iter = iter.into_iter();
let (lower, _) = iter.size_hint();
@@ -1156,7 +1167,10 @@ impl Extend<u8> for BytesMut {
}
impl<'a> Extend<&'a u8> for BytesMut {
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = &'a u8>,
+ {
self.extend(iter.into_iter().map(|b| *b))
}
}
@@ -1234,7 +1248,10 @@ impl Shared {
fn original_capacity_to_repr(cap: usize) -> usize {
let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
- cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH)
+ cmp::min(
+ width,
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
+ )
}
fn original_capacity_from_repr(repr: usize) -> usize {
@@ -1402,7 +1419,8 @@ impl PartialOrd<BytesMut> for String {
}
impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
- where BytesMut: PartialEq<T>
+where
+ BytesMut: PartialEq<T>,
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
@@ -1410,7 +1428,8 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
}
impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
- where BytesMut: PartialOrd<T>
+where
+ BytesMut: PartialOrd<T>,
{
fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
self.partial_cmp(*other)
@@ -1528,8 +1547,8 @@ mod fuzz {
use loom::sync::Arc;
use loom::thread;
- use crate::Bytes;
use super::BytesMut;
+ use crate::Bytes;
#[test]
fn bytes_mut_cloning_frozen() {