aboutsummaryrefslogtreecommitdiff
path: root/src/bytes.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/bytes.rs')
-rw-r--r--src/bytes.rs133
1 files changed, 109 insertions, 24 deletions
diff --git a/src/bytes.rs b/src/bytes.rs
index a9aefa9..3343741 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -95,8 +95,8 @@ use std::iter::{FromIterator, Iterator};
/// # Inline bytes
///
/// As an optimization, when the slice referenced by a `Bytes` or `BytesMut`
-/// handle is small enough [^1], `with_capacity` will avoid the allocation by
-/// inlining the slice directly in the handle. In this case, a clone is no
+/// handle is small enough [^1], `with_capacity` will avoid the allocation
+/// by inlining the slice directly in the handle. In this case, a clone is no
/// longer "shallow" and the data will be copied. Converting from a `Vec` will
/// never use inlining.
///
@@ -485,6 +485,20 @@ impl Bytes {
self.inner.is_empty()
}
+ /// Return true if the `Bytes` uses inline allocation
+ ///
+ /// # Examples
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// assert!(Bytes::with_capacity(4).is_inline());
+ /// assert!(!Bytes::from(Vec::with_capacity(4)).is_inline());
+ /// assert!(!Bytes::with_capacity(1024).is_inline());
+ /// ```
+ pub fn is_inline(&self) -> bool {
+ self.inner.is_inline()
+ }
+
/// Returns a slice of self for the index range `[begin..end)`.
///
/// This will increment the reference count for the underlying memory and
@@ -791,6 +805,17 @@ impl Bytes {
}
}
+ /// Acquires a mutable reference to the owned form of the data.
+ ///
+ /// Clones the data if it is not already owned.
+ pub fn to_mut(&mut self) -> &mut BytesMut {
+ if !self.inner.is_mut_safe() {
+ let new = Bytes::from(&self[..]);
+ *self = new;
+ }
+ unsafe { &mut *(self as *mut Bytes as *mut BytesMut) }
+ }
+
/// Appends given bytes to this object.
///
/// If this `Bytes` object has not enough capacity, it is resized first.
@@ -832,6 +857,36 @@ impl Bytes {
mem::replace(self, result.freeze());
}
+
+ /// Combine splitted Bytes objects back as contiguous.
+ ///
+ /// If `Bytes` objects were not contiguous originally, they will be extended.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let mut buf = Bytes::with_capacity(64);
+ /// buf.extend_from_slice(b"aaabbbcccddd");
+ ///
+ /// let splitted = buf.split_off(6);
+ /// assert_eq!(b"aaabbb", &buf[..]);
+ /// assert_eq!(b"cccddd", &splitted[..]);
+ ///
+ /// buf.unsplit(splitted);
+ /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+ /// ```
+ pub fn unsplit(&mut self, other: Bytes) {
+ if self.is_empty() {
+ *self = other;
+ return;
+ }
+
+ if let Err(other_inner) = self.inner.try_unsplit(other.inner) {
+ self.extend_from_slice(other_inner.as_ref());
+ }
+ }
}
impl IntoBuf for Bytes {
@@ -881,6 +936,11 @@ impl From<BytesMut> for Bytes {
}
impl From<Vec<u8>> for Bytes {
+ /// Convert a `Vec` into a `Bytes`
+ ///
+ /// This constructor may be used to avoid the inlining optimization used by
+ /// `with_capacity`. A `Bytes` constructed this way will always store its
+ /// data on the heap.
fn from(src: Vec<u8>) -> Bytes {
BytesMut::from(src).freeze()
}
@@ -1123,7 +1183,21 @@ impl BytesMut {
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
- self.len() == 0
+ self.inner.is_empty()
+ }
+
+ /// Return true if the `BytesMut` uses inline allocation
+ ///
+ /// # Examples
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// assert!(BytesMut::with_capacity(4).is_inline());
+ /// assert!(!BytesMut::from(Vec::with_capacity(4)).is_inline());
+ /// assert!(!BytesMut::with_capacity(1024).is_inline());
+ /// ```
+ pub fn is_inline(&self) -> bool {
+ self.inner.is_inline()
}
/// Returns the number of bytes the `BytesMut` can hold without reallocating.
@@ -1487,32 +1561,13 @@ impl BytesMut {
/// assert_eq!(b"aaabbbcccddd", &buf[..]);
/// ```
pub fn unsplit(&mut self, other: BytesMut) {
- let ptr;
-
- if other.is_empty() {
- return;
- }
-
if self.is_empty() {
*self = other;
return;
}
- unsafe {
- ptr = self.inner.ptr.offset(self.inner.len as isize);
- }
- if ptr == other.inner.ptr &&
- self.inner.kind() == KIND_ARC &&
- other.inner.kind() == KIND_ARC
- {
- debug_assert_eq!(self.inner.arc.load(Acquire),
- other.inner.arc.load(Acquire));
- // Contiguous blocks, just combine directly
- self.inner.len += other.inner.len;
- self.inner.cap += other.inner.cap;
- }
- else {
- self.extend_from_slice(&other);
+ if let Err(other_inner) = self.inner.try_unsplit(other.inner) {
+ self.extend_from_slice(other_inner.as_ref());
}
}
}
@@ -1608,6 +1663,11 @@ impl ops::DerefMut for BytesMut {
}
impl From<Vec<u8>> for BytesMut {
+ /// Convert a `Vec` into a `BytesMut`
+ ///
+ /// This constructor may be used to avoid the inlining optimization used by
+ /// `with_capacity`. A `BytesMut` constructed this way will always store
+ /// its data on the heap.
fn from(src: Vec<u8>) -> BytesMut {
BytesMut {
inner: Inner::from_vec(src),
@@ -1973,6 +2033,31 @@ impl Inner {
}
}
+ fn try_unsplit(&mut self, other: Inner) -> Result<(), Inner> {
+ let ptr;
+
+ if other.is_empty() {
+ return Ok(());
+ }
+
+ unsafe {
+ ptr = self.ptr.offset(self.len as isize);
+ }
+ if ptr == other.ptr &&
+ self.kind() == KIND_ARC &&
+ other.kind() == KIND_ARC
+ {
+ debug_assert_eq!(self.arc.load(Acquire),
+ other.arc.load(Acquire));
+ // Contiguous blocks, just combine directly
+ self.len += other.len;
+ self.cap += other.cap;
+ Ok(())
+ } else {
+ Err(other)
+ }
+ }
+
fn resize(&mut self, new_len: usize, value: u8) {
let len = self.len();
if new_len > len {