aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Sean McArthur <sean@seanmonstar.com> 2020-01-22 16:37:53 -0800
committerGravatar Carl Lerche <me@carllerche.com> 2020-01-22 16:37:53 -0800
commite0eebde9938d03eb60fe97c4a41f464e4c83d414 (patch)
tree60c04481aa9ad492d8b3f7bd425bdc06372fb5ee
parent729bc7c2084a42fda2c62da6933951fa7ac875aa (diff)
downloadbytes-e0eebde9938d03eb60fe97c4a41f464e4c83d414.tar.gz
bytes-e0eebde9938d03eb60fe97c4a41f464e4c83d414.tar.zst
bytes-e0eebde9938d03eb60fe97c4a41f464e4c83d414.zip
Fix Bytes::truncate losing the original Vec's capacity (#361)
-rw-r--r--src/bytes.rs10
-rw-r--r--tests/test_bytes_odd_alloc.rs2
-rw-r--r--tests/test_bytes_vec_alloc.rs75
3 files changed, 85 insertions, 2 deletions
diff --git a/src/bytes.rs b/src/bytes.rs
index 93ab84b..8ecc853 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -418,7 +418,15 @@ impl Bytes {
#[inline]
pub fn truncate(&mut self, len: usize) {
if len < self.len {
- self.len = len;
+ // The Vec "promotable" vtables do not store the capacity,
+ // so we cannot truncate while using this repr. We *have* to
+ // promote using `split_off` so the capacity can be stored.
+ if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE ||
+ self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE {
+ drop(self.split_off(len));
+ } else {
+ self.len = len;
+ }
}
}
diff --git a/tests/test_bytes_odd_alloc.rs b/tests/test_bytes_odd_alloc.rs
index 01e51ea..4ce424b 100644
--- a/tests/test_bytes_odd_alloc.rs
+++ b/tests/test_bytes_odd_alloc.rs
@@ -41,7 +41,7 @@ unsafe impl GlobalAlloc for Odd {
};
System.dealloc(ptr.offset(-1), new_layout);
} else {
- System.alloc(layout);
+ System.dealloc(ptr, layout);
}
}
}
diff --git a/tests/test_bytes_vec_alloc.rs b/tests/test_bytes_vec_alloc.rs
new file mode 100644
index 0000000..dc007cf
--- /dev/null
+++ b/tests/test_bytes_vec_alloc.rs
@@ -0,0 +1,75 @@
+use std::alloc::{GlobalAlloc, Layout, System};
+use std::{mem, ptr};
+
+use bytes::{Buf, Bytes};
+
+#[global_allocator]
+static LEDGER: Ledger = Ledger;
+
+struct Ledger;
+
+const USIZE_SIZE: usize = mem::size_of::<usize>();
+
+unsafe impl GlobalAlloc for Ledger {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ if layout.align() == 1 && layout.size() > 0 {
+ // Allocate extra space to stash a record of
+ // how much space there was.
+ let orig_size = layout.size();
+ let size = orig_size + USIZE_SIZE;
+ let new_layout = match Layout::from_size_align(size, 1) {
+ Ok(layout) => layout,
+ Err(_err) => return ptr::null_mut(),
+ };
+ let ptr = System.alloc(new_layout);
+ if !ptr.is_null() {
+ (ptr as *mut usize).write(orig_size);
+ let ptr = ptr.offset(USIZE_SIZE as isize);
+ ptr
+ } else {
+ ptr
+ }
+ } else {
+ System.alloc(layout)
+ }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ if layout.align() == 1 && layout.size() > 0 {
+ let off_ptr = (ptr as *mut usize).offset(-1);
+ let orig_size = off_ptr.read();
+ if orig_size != layout.size() {
+ panic!("bad dealloc: alloc size was {}, dealloc size is {}", orig_size, layout.size());
+ }
+
+ let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
+ Ok(layout) => layout,
+ Err(_err) => std::process::abort(),
+ };
+ System.dealloc(off_ptr as *mut u8, new_layout);
+ } else {
+ System.dealloc(ptr, layout);
+ }
+ }
+}
+#[test]
+fn test_bytes_advance() {
+ let mut bytes = Bytes::from(vec![10, 20, 30]);
+ bytes.advance(1);
+ drop(bytes);
+}
+
+#[test]
+fn test_bytes_truncate() {
+ let mut bytes = Bytes::from(vec![10, 20, 30]);
+ bytes.truncate(2);
+ drop(bytes);
+}
+
+#[test]
+fn test_bytes_truncate_and_advance() {
+ let mut bytes = Bytes::from(vec![10, 20, 30]);
+ bytes.truncate(2);
+ bytes.advance(1);
+ drop(bytes);
+}