aboutsummaryrefslogtreecommitdiff
path: root/src/bytes.rs
diff options
context:
space:
mode:
authorGravatar Sean McArthur <sean@seanmonstar.com> 2019-06-06 14:05:16 -0700
committerGravatar Sean McArthur <sean@seanmonstar.com> 2019-06-06 14:08:29 -0700
commit5759211ff81c3601015a0c0dad96f9366965446c (patch)
treef35a46ed391a2dedf99e0e4ff5181a559ffdf50c /src/bytes.rs
parent60aceba2bb1dfacbca395832d2f36eabd517fcb1 (diff)
parentd43e283e5ed520e54df2428f2cf9a7c13c79ff49 (diff)
downloadbytes-5759211ff81c3601015a0c0dad96f9366965446c.tar.gz
bytes-5759211ff81c3601015a0c0dad96f9366965446c.tar.zst
bytes-5759211ff81c3601015a0c0dad96f9366965446c.zip
Merge branch 'v0.4.x' into uplift-0.4-commits
Diffstat (limited to 'src/bytes.rs')
-rw-r--r--src/bytes.rs120
1 files changed, 97 insertions, 23 deletions
diff --git a/src/bytes.rs b/src/bytes.rs
index 7fc3536..3343741 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -273,7 +273,7 @@ pub struct BytesMut {
// The rest of `arc`'s bytes are used as part of the inline buffer, which means
// that those bytes need to be located next to the `ptr`, `len`, and `cap`
// fields, which make up the rest of the inline buffer. This requires special
-// casing the layout of `Inner` depending on if the target platform is bit or
+// casing the layout of `Inner` depending on if the target platform is big or
// little endian.
//
// On little endian platforms, the `arc` field must be the first field in the
@@ -590,6 +590,46 @@ impl Bytes {
self.slice(0, end)
}
+ /// Returns a slice of self that is equivalent to the given `subset`.
+ ///
+ /// When processing a `Bytes` buffer with other tools, one often gets a
+ /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
+ /// This function turns that `&[u8]` into another `Bytes`, as if one had
+ /// called `self.slice()` with the offsets that correspond to `subset`.
+ ///
+ /// This operation is `O(1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let bytes = Bytes::from(&b"012345678"[..]);
+ /// let as_slice = bytes.as_ref();
+ /// let subset = &as_slice[2..6];
+ /// let subslice = bytes.slice_ref(&subset);
+ /// assert_eq!(&subslice[..], b"2345");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Requires that the given `sub` slice is in fact contained within the
+ /// `Bytes` buffer; otherwise this function will panic.
+ pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
+ let bytes_p = self.as_ptr() as usize;
+ let bytes_len = self.len();
+
+ let sub_p = subset.as_ptr() as usize;
+ let sub_len = subset.len();
+
+ assert!(sub_p >= bytes_p);
+ assert!(sub_p + sub_len <= bytes_p + bytes_len);
+
+ let sub_offset = sub_p - bytes_p;
+
+ self.slice(sub_offset, sub_offset + sub_len)
+ }
+
/// Splits the bytes into two at the given index.
///
/// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
@@ -946,6 +986,18 @@ impl FromIterator<u8> for Bytes {
}
}
+impl<'a> FromIterator<&'a u8> for BytesMut {
+ fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
+ BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
+ }
+}
+
+impl<'a> FromIterator<&'a u8> for Bytes {
+ fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
+ BytesMut::from_iter(into_iter).freeze()
+ }
+}
+
impl PartialEq for Bytes {
fn eq(&self, other: &Bytes) -> bool {
self.inner.as_ref() == other.inner.as_ref()
@@ -1287,6 +1339,8 @@ impl BytesMut {
///
/// Panics if `at > len`.
pub fn split_to(&mut self, at: usize) -> BytesMut {
+ assert!(at <= self.len());
+
BytesMut {
inner: self.inner.split_to(at),
}
@@ -2474,6 +2528,10 @@ impl Inner {
// bits, so even without any explicit atomic operations, reading the
// flag will be correct.
//
+ // This is undefind behavior due to a data race, but experimental
+ // evidence shows that it works in practice (discussion:
+ // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
+ //
// This function is very critical performance wise as it is called for
// every operation. Performing an atomic load would mess with the
// compiler's ability to optimize. Simple benchmarks show up to a 10%
@@ -2483,7 +2541,7 @@ impl Inner {
#[inline]
fn imp(arc: &AtomicPtr<Shared>) -> usize {
unsafe {
- let p: &u8 = mem::transmute(arc);
+ let p: *const u8 = mem::transmute(arc);
(*p as usize) & KIND_MASK
}
}
@@ -2492,7 +2550,7 @@ impl Inner {
#[inline]
fn imp(arc: &AtomicPtr<Shared>) -> usize {
unsafe {
- let p: &usize = mem::transmute(arc);
+ let p: *const usize = mem::transmute(arc);
*p & KIND_MASK
}
}
@@ -2508,7 +2566,7 @@ impl Inner {
// function.
let prev = unsafe {
let p: &AtomicPtr<Shared> = &self.arc;
- let p: &usize = mem::transmute(p);
+ let p: *const usize = mem::transmute(p);
*p
};
@@ -2615,35 +2673,51 @@ fn original_capacity_from_repr(repr: usize) -> usize {
#[test]
fn test_original_capacity_to_repr() {
- for &cap in &[0, 1, 16, 1000] {
- assert_eq!(0, original_capacity_to_repr(cap));
- }
+ assert_eq!(original_capacity_to_repr(0), 0);
- for &cap in &[1024, 1025, 1100, 2000, 2047] {
- assert_eq!(1, original_capacity_to_repr(cap));
- }
+ let max_width = 32;
- for &cap in &[2048, 2049] {
- assert_eq!(2, original_capacity_to_repr(cap));
- }
+ for width in 1..(max_width + 1) {
+ let cap = 1 << width - 1;
+
+ let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
+ 0
+ } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
+ width - MIN_ORIGINAL_CAPACITY_WIDTH
+ } else {
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
+ };
- // TODO: more
+ assert_eq!(original_capacity_to_repr(cap), expected);
- for &cap in &[65536, 65537, 68000, 1 << 17, 1 << 18, 1 << 20, 1 << 30] {
- assert_eq!(7, original_capacity_to_repr(cap), "cap={}", cap);
+ if width > 1 {
+ assert_eq!(original_capacity_to_repr(cap + 1), expected);
+ }
+
+ // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
+ if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
+ assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
+ assert_eq!(original_capacity_to_repr(cap + 76), expected);
+ } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
+ assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
+ assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
+ }
}
}
#[test]
fn test_original_capacity_from_repr() {
assert_eq!(0, original_capacity_from_repr(0));
- assert_eq!(1024, original_capacity_from_repr(1));
- assert_eq!(1024 * 2, original_capacity_from_repr(2));
- assert_eq!(1024 * 4, original_capacity_from_repr(3));
- assert_eq!(1024 * 8, original_capacity_from_repr(4));
- assert_eq!(1024 * 16, original_capacity_from_repr(5));
- assert_eq!(1024 * 32, original_capacity_from_repr(6));
- assert_eq!(1024 * 64, original_capacity_from_repr(7));
+
+ let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
+
+ assert_eq!(min_cap, original_capacity_from_repr(1));
+ assert_eq!(min_cap * 2, original_capacity_from_repr(2));
+ assert_eq!(min_cap * 4, original_capacity_from_repr(3));
+ assert_eq!(min_cap * 8, original_capacity_from_repr(4));
+ assert_eq!(min_cap * 16, original_capacity_from_repr(5));
+ assert_eq!(min_cap * 32, original_capacity_from_repr(6));
+ assert_eq!(min_cap * 64, original_capacity_from_repr(7));
}
unsafe impl Send for Inner {}