bytes/
bytes.rs

1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::{cmp, fmt, hash, ptr, slice, usize};
5
6use alloc::{
7    alloc::{dealloc, Layout},
8    borrow::Borrow,
9    boxed::Box,
10    string::String,
11    vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{offset_from, Buf, BytesMut};
19
20/// A cheaply cloneable and sliceable chunk of contiguous memory.
21///
22/// `Bytes` is an efficient container for storing and operating on contiguous
23/// slices of memory. It is intended for use primarily in networking code, but
24/// could have applications elsewhere as well.
25///
26/// `Bytes` values facilitate zero-copy network programming by allowing multiple
27/// `Bytes` objects to point to the same underlying memory.
28///
29/// `Bytes` does not have a single implementation. It is an interface, whose
30/// exact behavior is implemented through dynamic dispatch in several underlying
31/// implementations of `Bytes`.
32///
33/// All `Bytes` implementations must fulfill the following requirements:
34/// - They are cheaply cloneable and thereby shareable between an unlimited amount
35///   of components, for example by modifying a reference count.
36/// - Instances can be sliced to refer to a subset of the original buffer.
37///
38/// ```
39/// use bytes::Bytes;
40///
41/// let mut mem = Bytes::from("Hello world");
42/// let a = mem.slice(0..5);
43///
44/// assert_eq!(a, "Hello");
45///
46/// let b = mem.split_to(6);
47///
48/// assert_eq!(mem, "world");
49/// assert_eq!(b, "Hello ");
50/// ```
51///
52/// # Memory layout
53///
54/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
55/// to track information about which segment of the underlying memory the
56/// `Bytes` handle has access to.
57///
58/// `Bytes` keeps both a pointer to the shared state containing the full memory
59/// slice and a pointer to the start of the region visible by the handle.
60/// `Bytes` also tracks the length of its view into the memory.
61///
62/// # Sharing
63///
64/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
65/// how sharing/cloning is implemented in detail.
66/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
67/// cloning the backing storage in order to share it behind multiple `Bytes`
68/// instances.
69///
70/// For `Bytes` implementations which refer to constant memory (e.g. created
71/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
72///
73/// For `Bytes` implementations which point to a reference counted shared storage
74/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
75/// reference count.
76///
77/// Due to this mechanism, multiple `Bytes` instances may point to the same
78/// shared memory region.
79/// Each `Bytes` instance can point to different sections within that
80/// memory region, and `Bytes` instances may or may not have overlapping views
81/// into the memory.
82///
83/// The following diagram visualizes a scenario where 2 `Bytes` instances make
84/// use of an `Arc`-based backing storage, and provide access to different views:
85///
86/// ```text
87///
88///    Arc ptrs                   ┌─────────┐
89///    ________________________ / │ Bytes 2 │
90///   /                           └─────────┘
91///  /          ┌───────────┐     |         |
92/// |_________/ │  Bytes 1  │     |         |
93/// |           └───────────┘     |         |
94/// |           |           | ___/ data     | tail
95/// |      data |      tail |/              |
96/// v           v           v               v
97/// ┌─────┬─────┬───────────┬───────────────┬─────┐
98/// │ Arc │     │           │               │     │
99/// └─────┴─────┴───────────┴───────────────┴─────┘
100/// ```
101pub struct Bytes {
102    ptr: *const u8,
103    len: usize,
104    // inlined "trait object"
105    data: AtomicPtr<()>,
106    vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110    /// fn(data, ptr, len)
111    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112    /// fn(data, ptr, len)
113    ///
114    /// takes `Bytes` to value
115    pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116    pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117    /// fn(data)
118    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119    /// fn(data, ptr, len)
120    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124    /// Creates a new empty `Bytes`.
125    ///
126    /// This will not allocate and the returned `Bytes` handle will be empty.
127    ///
128    /// # Examples
129    ///
130    /// ```
131    /// use bytes::Bytes;
132    ///
133    /// let b = Bytes::new();
134    /// assert_eq!(&b[..], b"");
135    /// ```
136    #[inline]
137    #[cfg(not(all(loom, test)))]
138    pub const fn new() -> Self {
139        // Make it a named const to work around
140        // "unsizing casts are not allowed in const fn"
141        const EMPTY: &[u8] = &[];
142        Bytes::from_static(EMPTY)
143    }
144
145    /// Creates a new empty `Bytes`.
146    #[cfg(all(loom, test))]
147    pub fn new() -> Self {
148        const EMPTY: &[u8] = &[];
149        Bytes::from_static(EMPTY)
150    }
151
152    /// Creates a new `Bytes` from a static slice.
153    ///
154    /// The returned `Bytes` will point directly to the static slice. There is
155    /// no allocating or copying.
156    ///
157    /// # Examples
158    ///
159    /// ```
160    /// use bytes::Bytes;
161    ///
162    /// let b = Bytes::from_static(b"hello");
163    /// assert_eq!(&b[..], b"hello");
164    /// ```
165    #[inline]
166    #[cfg(not(all(loom, test)))]
167    pub const fn from_static(bytes: &'static [u8]) -> Self {
168        Bytes {
169            ptr: bytes.as_ptr(),
170            len: bytes.len(),
171            data: AtomicPtr::new(ptr::null_mut()),
172            vtable: &STATIC_VTABLE,
173        }
174    }
175
176    /// Creates a new `Bytes` from a static slice.
177    #[cfg(all(loom, test))]
178    pub fn from_static(bytes: &'static [u8]) -> Self {
179        Bytes {
180            ptr: bytes.as_ptr(),
181            len: bytes.len(),
182            data: AtomicPtr::new(ptr::null_mut()),
183            vtable: &STATIC_VTABLE,
184        }
185    }
186
187    /// Creates a new `Bytes` with length zero and the given pointer as the address.
188    fn new_empty_with_ptr(ptr: *const u8) -> Self {
189        debug_assert!(!ptr.is_null());
190
191        // Detach this pointer's provenance from whichever allocation it came from, and reattach it
192        // to the provenance of the fake ZST [u8;0] at the same address.
193        let ptr = without_provenance(ptr as usize);
194
195        Bytes {
196            ptr,
197            len: 0,
198            data: AtomicPtr::new(ptr::null_mut()),
199            vtable: &STATIC_VTABLE,
200        }
201    }
202
203    /// Returns the number of bytes contained in this `Bytes`.
204    ///
205    /// # Examples
206    ///
207    /// ```
208    /// use bytes::Bytes;
209    ///
210    /// let b = Bytes::from(&b"hello"[..]);
211    /// assert_eq!(b.len(), 5);
212    /// ```
213    #[inline]
214    pub const fn len(&self) -> usize {
215        self.len
216    }
217
218    /// Returns true if the `Bytes` has a length of 0.
219    ///
220    /// # Examples
221    ///
222    /// ```
223    /// use bytes::Bytes;
224    ///
225    /// let b = Bytes::new();
226    /// assert!(b.is_empty());
227    /// ```
228    #[inline]
229    pub const fn is_empty(&self) -> bool {
230        self.len == 0
231    }
232
233    /// Returns true if this is the only reference to the data.
234    ///
235    /// Always returns false if the data is backed by a static slice.
236    ///
237    /// The result of this method may be invalidated immediately if another
238    /// thread clones this value while this is being called. Ensure you have
239    /// unique access to this value (`&mut Bytes`) first if you need to be
240    /// certain the result is valid (i.e. for safety reasons)
241    /// # Examples
242    ///
243    /// ```
244    /// use bytes::Bytes;
245    ///
246    /// let a = Bytes::from(vec![1, 2, 3]);
247    /// assert!(a.is_unique());
248    /// let b = a.clone();
249    /// assert!(!a.is_unique());
250    /// ```
251    pub fn is_unique(&self) -> bool {
252        unsafe { (self.vtable.is_unique)(&self.data) }
253    }
254
255    /// Creates `Bytes` instance from slice, by copying it.
256    pub fn copy_from_slice(data: &[u8]) -> Self {
257        data.to_vec().into()
258    }
259
260    /// Returns a slice of self for the provided range.
261    ///
262    /// This will increment the reference count for the underlying memory and
263    /// return a new `Bytes` handle set to the slice.
264    ///
265    /// This operation is `O(1)`.
266    ///
267    /// # Examples
268    ///
269    /// ```
270    /// use bytes::Bytes;
271    ///
272    /// let a = Bytes::from(&b"hello world"[..]);
273    /// let b = a.slice(2..5);
274    ///
275    /// assert_eq!(&b[..], b"llo");
276    /// ```
277    ///
278    /// # Panics
279    ///
280    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
281    /// will panic.
282    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
283        use core::ops::Bound;
284
285        let len = self.len();
286
287        let begin = match range.start_bound() {
288            Bound::Included(&n) => n,
289            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
290            Bound::Unbounded => 0,
291        };
292
293        let end = match range.end_bound() {
294            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
295            Bound::Excluded(&n) => n,
296            Bound::Unbounded => len,
297        };
298
299        assert!(
300            begin <= end,
301            "range start must not be greater than end: {:?} <= {:?}",
302            begin,
303            end,
304        );
305        assert!(
306            end <= len,
307            "range end out of bounds: {:?} <= {:?}",
308            end,
309            len,
310        );
311
312        if end == begin {
313            return Bytes::new();
314        }
315
316        let mut ret = self.clone();
317
318        ret.len = end - begin;
319        ret.ptr = unsafe { ret.ptr.add(begin) };
320
321        ret
322    }
323
324    /// Returns a slice of self that is equivalent to the given `subset`.
325    ///
326    /// When processing a `Bytes` buffer with other tools, one often gets a
327    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
328    /// This function turns that `&[u8]` into another `Bytes`, as if one had
329    /// called `self.slice()` with the offsets that correspond to `subset`.
330    ///
331    /// This operation is `O(1)`.
332    ///
333    /// # Examples
334    ///
335    /// ```
336    /// use bytes::Bytes;
337    ///
338    /// let bytes = Bytes::from(&b"012345678"[..]);
339    /// let as_slice = bytes.as_ref();
340    /// let subset = &as_slice[2..6];
341    /// let subslice = bytes.slice_ref(&subset);
342    /// assert_eq!(&subslice[..], b"2345");
343    /// ```
344    ///
345    /// # Panics
346    ///
347    /// Requires that the given `sub` slice is in fact contained within the
348    /// `Bytes` buffer; otherwise this function will panic.
349    pub fn slice_ref(&self, subset: &[u8]) -> Self {
350        // Empty slice and empty Bytes may have their pointers reset
351        // so explicitly allow empty slice to be a subslice of any slice.
352        if subset.is_empty() {
353            return Bytes::new();
354        }
355
356        let bytes_p = self.as_ptr() as usize;
357        let bytes_len = self.len();
358
359        let sub_p = subset.as_ptr() as usize;
360        let sub_len = subset.len();
361
362        assert!(
363            sub_p >= bytes_p,
364            "subset pointer ({:p}) is smaller than self pointer ({:p})",
365            subset.as_ptr(),
366            self.as_ptr(),
367        );
368        assert!(
369            sub_p + sub_len <= bytes_p + bytes_len,
370            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
371            self.as_ptr(),
372            bytes_len,
373            subset.as_ptr(),
374            sub_len,
375        );
376
377        let sub_offset = sub_p - bytes_p;
378
379        self.slice(sub_offset..(sub_offset + sub_len))
380    }
381
382    /// Splits the bytes into two at the given index.
383    ///
384    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
385    /// contains elements `[at, len)`. It's guaranteed that the memory does not
386    /// move, that is, the address of `self` does not change, and the address of
387    /// the returned slice is `at` bytes after that.
388    ///
389    /// This is an `O(1)` operation that just increases the reference count and
390    /// sets a few indices.
391    ///
392    /// # Examples
393    ///
394    /// ```
395    /// use bytes::Bytes;
396    ///
397    /// let mut a = Bytes::from(&b"hello world"[..]);
398    /// let b = a.split_off(5);
399    ///
400    /// assert_eq!(&a[..], b"hello");
401    /// assert_eq!(&b[..], b" world");
402    /// ```
403    ///
404    /// # Panics
405    ///
406    /// Panics if `at > len`.
407    #[must_use = "consider Bytes::truncate if you don't need the other half"]
408    pub fn split_off(&mut self, at: usize) -> Self {
409        if at == self.len() {
410            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at));
411        }
412
413        if at == 0 {
414            return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr));
415        }
416
417        assert!(
418            at <= self.len(),
419            "split_off out of bounds: {:?} <= {:?}",
420            at,
421            self.len(),
422        );
423
424        let mut ret = self.clone();
425
426        self.len = at;
427
428        unsafe { ret.inc_start(at) };
429
430        ret
431    }
432
433    /// Splits the bytes into two at the given index.
434    ///
435    /// Afterwards `self` contains elements `[at, len)`, and the returned
436    /// `Bytes` contains elements `[0, at)`.
437    ///
438    /// This is an `O(1)` operation that just increases the reference count and
439    /// sets a few indices.
440    ///
441    /// # Examples
442    ///
443    /// ```
444    /// use bytes::Bytes;
445    ///
446    /// let mut a = Bytes::from(&b"hello world"[..]);
447    /// let b = a.split_to(5);
448    ///
449    /// assert_eq!(&a[..], b" world");
450    /// assert_eq!(&b[..], b"hello");
451    /// ```
452    ///
453    /// # Panics
454    ///
455    /// Panics if `at > len`.
456    #[must_use = "consider Bytes::advance if you don't need the other half"]
457    pub fn split_to(&mut self, at: usize) -> Self {
458        if at == self.len() {
459            let end_ptr = self.ptr.wrapping_add(at);
460            return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr));
461        }
462
463        if at == 0 {
464            return Bytes::new_empty_with_ptr(self.ptr);
465        }
466
467        assert!(
468            at <= self.len(),
469            "split_to out of bounds: {:?} <= {:?}",
470            at,
471            self.len(),
472        );
473
474        let mut ret = self.clone();
475
476        unsafe { self.inc_start(at) };
477
478        ret.len = at;
479        ret
480    }
481
482    /// Shortens the buffer, keeping the first `len` bytes and dropping the
483    /// rest.
484    ///
485    /// If `len` is greater than the buffer's current length, this has no
486    /// effect.
487    ///
488    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
489    /// excess bytes to be returned instead of dropped.
490    ///
491    /// # Examples
492    ///
493    /// ```
494    /// use bytes::Bytes;
495    ///
496    /// let mut buf = Bytes::from(&b"hello world"[..]);
497    /// buf.truncate(5);
498    /// assert_eq!(buf, b"hello"[..]);
499    /// ```
500    #[inline]
501    pub fn truncate(&mut self, len: usize) {
502        if len < self.len {
503            // The Vec "promotable" vtables do not store the capacity,
504            // so we cannot truncate while using this repr. We *have* to
505            // promote using `split_off` so the capacity can be stored.
506            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
507                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
508            {
509                drop(self.split_off(len));
510            } else {
511                self.len = len;
512            }
513        }
514    }
515
516    /// Clears the buffer, removing all data.
517    ///
518    /// # Examples
519    ///
520    /// ```
521    /// use bytes::Bytes;
522    ///
523    /// let mut buf = Bytes::from(&b"hello world"[..]);
524    /// buf.clear();
525    /// assert!(buf.is_empty());
526    /// ```
527    #[inline]
528    pub fn clear(&mut self) {
529        self.truncate(0);
530    }
531
532    /// Try to convert self into `BytesMut`.
533    ///
534    /// If `self` is unique for the entire original buffer, this will succeed
535    /// and return a `BytesMut` with the contents of `self` without copying.
536    /// If `self` is not unique for the entire original buffer, this will fail
537    /// and return self.
538    ///
539    /// # Examples
540    ///
541    /// ```
542    /// use bytes::{Bytes, BytesMut};
543    ///
544    /// let bytes = Bytes::from(b"hello".to_vec());
545    /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
546    /// ```
547    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
548        if self.is_unique() {
549            Ok(self.into())
550        } else {
551            Err(self)
552        }
553    }
554
555    #[inline]
556    pub(crate) unsafe fn with_vtable(
557        ptr: *const u8,
558        len: usize,
559        data: AtomicPtr<()>,
560        vtable: &'static Vtable,
561    ) -> Bytes {
562        Bytes {
563            ptr,
564            len,
565            data,
566            vtable,
567        }
568    }
569
570    // private
571
572    #[inline]
573    fn as_slice(&self) -> &[u8] {
574        unsafe { slice::from_raw_parts(self.ptr, self.len) }
575    }
576
577    #[inline]
578    unsafe fn inc_start(&mut self, by: usize) {
579        // should already be asserted, but debug assert for tests
580        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
581        self.len -= by;
582        self.ptr = self.ptr.add(by);
583    }
584}
585
586// Vtable must enforce this behavior
587unsafe impl Send for Bytes {}
588unsafe impl Sync for Bytes {}
589
590impl Drop for Bytes {
591    #[inline]
592    fn drop(&mut self) {
593        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
594    }
595}
596
597impl Clone for Bytes {
598    #[inline]
599    fn clone(&self) -> Bytes {
600        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
601    }
602}
603
604impl Buf for Bytes {
605    #[inline]
606    fn remaining(&self) -> usize {
607        self.len()
608    }
609
610    #[inline]
611    fn chunk(&self) -> &[u8] {
612        self.as_slice()
613    }
614
615    #[inline]
616    fn advance(&mut self, cnt: usize) {
617        assert!(
618            cnt <= self.len(),
619            "cannot advance past `remaining`: {:?} <= {:?}",
620            cnt,
621            self.len(),
622        );
623
624        unsafe {
625            self.inc_start(cnt);
626        }
627    }
628
629    fn copy_to_bytes(&mut self, len: usize) -> Self {
630        self.split_to(len)
631    }
632}
633
634impl Deref for Bytes {
635    type Target = [u8];
636
637    #[inline]
638    fn deref(&self) -> &[u8] {
639        self.as_slice()
640    }
641}
642
643impl AsRef<[u8]> for Bytes {
644    #[inline]
645    fn as_ref(&self) -> &[u8] {
646        self.as_slice()
647    }
648}
649
650impl hash::Hash for Bytes {
651    fn hash<H>(&self, state: &mut H)
652    where
653        H: hash::Hasher,
654    {
655        self.as_slice().hash(state);
656    }
657}
658
659impl Borrow<[u8]> for Bytes {
660    fn borrow(&self) -> &[u8] {
661        self.as_slice()
662    }
663}
664
665impl IntoIterator for Bytes {
666    type Item = u8;
667    type IntoIter = IntoIter<Bytes>;
668
669    fn into_iter(self) -> Self::IntoIter {
670        IntoIter::new(self)
671    }
672}
673
674impl<'a> IntoIterator for &'a Bytes {
675    type Item = &'a u8;
676    type IntoIter = core::slice::Iter<'a, u8>;
677
678    fn into_iter(self) -> Self::IntoIter {
679        self.as_slice().iter()
680    }
681}
682
683impl FromIterator<u8> for Bytes {
684    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
685        Vec::from_iter(into_iter).into()
686    }
687}
688
689// impl Eq
690
691impl PartialEq for Bytes {
692    fn eq(&self, other: &Bytes) -> bool {
693        self.as_slice() == other.as_slice()
694    }
695}
696
697impl PartialOrd for Bytes {
698    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
699        self.as_slice().partial_cmp(other.as_slice())
700    }
701}
702
703impl Ord for Bytes {
704    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
705        self.as_slice().cmp(other.as_slice())
706    }
707}
708
709impl Eq for Bytes {}
710
711impl PartialEq<[u8]> for Bytes {
712    fn eq(&self, other: &[u8]) -> bool {
713        self.as_slice() == other
714    }
715}
716
717impl PartialOrd<[u8]> for Bytes {
718    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
719        self.as_slice().partial_cmp(other)
720    }
721}
722
723impl PartialEq<Bytes> for [u8] {
724    fn eq(&self, other: &Bytes) -> bool {
725        *other == *self
726    }
727}
728
729impl PartialOrd<Bytes> for [u8] {
730    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
731        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
732    }
733}
734
735impl PartialEq<str> for Bytes {
736    fn eq(&self, other: &str) -> bool {
737        self.as_slice() == other.as_bytes()
738    }
739}
740
741impl PartialOrd<str> for Bytes {
742    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
743        self.as_slice().partial_cmp(other.as_bytes())
744    }
745}
746
747impl PartialEq<Bytes> for str {
748    fn eq(&self, other: &Bytes) -> bool {
749        *other == *self
750    }
751}
752
753impl PartialOrd<Bytes> for str {
754    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
755        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
756    }
757}
758
759impl PartialEq<Vec<u8>> for Bytes {
760    fn eq(&self, other: &Vec<u8>) -> bool {
761        *self == other[..]
762    }
763}
764
765impl PartialOrd<Vec<u8>> for Bytes {
766    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
767        self.as_slice().partial_cmp(&other[..])
768    }
769}
770
771impl PartialEq<Bytes> for Vec<u8> {
772    fn eq(&self, other: &Bytes) -> bool {
773        *other == *self
774    }
775}
776
777impl PartialOrd<Bytes> for Vec<u8> {
778    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
779        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
780    }
781}
782
783impl PartialEq<String> for Bytes {
784    fn eq(&self, other: &String) -> bool {
785        *self == other[..]
786    }
787}
788
789impl PartialOrd<String> for Bytes {
790    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
791        self.as_slice().partial_cmp(other.as_bytes())
792    }
793}
794
795impl PartialEq<Bytes> for String {
796    fn eq(&self, other: &Bytes) -> bool {
797        *other == *self
798    }
799}
800
801impl PartialOrd<Bytes> for String {
802    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
803        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
804    }
805}
806
807impl PartialEq<Bytes> for &[u8] {
808    fn eq(&self, other: &Bytes) -> bool {
809        *other == *self
810    }
811}
812
813impl PartialOrd<Bytes> for &[u8] {
814    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
815        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
816    }
817}
818
819impl PartialEq<Bytes> for &str {
820    fn eq(&self, other: &Bytes) -> bool {
821        *other == *self
822    }
823}
824
825impl PartialOrd<Bytes> for &str {
826    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
827        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
828    }
829}
830
831impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
832where
833    Bytes: PartialEq<T>,
834{
835    fn eq(&self, other: &&'a T) -> bool {
836        *self == **other
837    }
838}
839
840impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
841where
842    Bytes: PartialOrd<T>,
843{
844    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
845        self.partial_cmp(&**other)
846    }
847}
848
849// impl From
850
851impl Default for Bytes {
852    #[inline]
853    fn default() -> Bytes {
854        Bytes::new()
855    }
856}
857
858impl From<&'static [u8]> for Bytes {
859    fn from(slice: &'static [u8]) -> Bytes {
860        Bytes::from_static(slice)
861    }
862}
863
864impl From<&'static str> for Bytes {
865    fn from(slice: &'static str) -> Bytes {
866        Bytes::from_static(slice.as_bytes())
867    }
868}
869
870impl From<Vec<u8>> for Bytes {
871    fn from(vec: Vec<u8>) -> Bytes {
872        let mut vec = ManuallyDrop::new(vec);
873        let ptr = vec.as_mut_ptr();
874        let len = vec.len();
875        let cap = vec.capacity();
876
877        // Avoid an extra allocation if possible.
878        if len == cap {
879            let vec = ManuallyDrop::into_inner(vec);
880            return Bytes::from(vec.into_boxed_slice());
881        }
882
883        let shared = Box::new(Shared {
884            buf: ptr,
885            cap,
886            ref_cnt: AtomicUsize::new(1),
887        });
888
889        let shared = Box::into_raw(shared);
890        // The pointer should be aligned, so this assert should
891        // always succeed.
892        debug_assert!(
893            0 == (shared as usize & KIND_MASK),
894            "internal: Box<Shared> should have an aligned pointer",
895        );
896        Bytes {
897            ptr,
898            len,
899            data: AtomicPtr::new(shared as _),
900            vtable: &SHARED_VTABLE,
901        }
902    }
903}
904
905impl From<Box<[u8]>> for Bytes {
906    fn from(slice: Box<[u8]>) -> Bytes {
907        // Box<[u8]> doesn't contain a heap allocation for empty slices,
908        // so the pointer isn't aligned enough for the KIND_VEC stashing to
909        // work.
910        if slice.is_empty() {
911            return Bytes::new();
912        }
913
914        let len = slice.len();
915        let ptr = Box::into_raw(slice) as *mut u8;
916
917        if ptr as usize & 0x1 == 0 {
918            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
919            Bytes {
920                ptr,
921                len,
922                data: AtomicPtr::new(data.cast()),
923                vtable: &PROMOTABLE_EVEN_VTABLE,
924            }
925        } else {
926            Bytes {
927                ptr,
928                len,
929                data: AtomicPtr::new(ptr.cast()),
930                vtable: &PROMOTABLE_ODD_VTABLE,
931            }
932        }
933    }
934}
935
936impl From<Bytes> for BytesMut {
937    /// Convert self into `BytesMut`.
938    ///
939    /// If `bytes` is unique for the entire original buffer, this will return a
940    /// `BytesMut` with the contents of `bytes` without copying.
941    /// If `bytes` is not unique for the entire original buffer, this will make
942    /// a copy of `bytes` subset of the original buffer in a new `BytesMut`.
943    ///
944    /// # Examples
945    ///
946    /// ```
947    /// use bytes::{Bytes, BytesMut};
948    ///
949    /// let bytes = Bytes::from(b"hello".to_vec());
950    /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
951    /// ```
952    fn from(bytes: Bytes) -> Self {
953        let bytes = ManuallyDrop::new(bytes);
954        unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
955    }
956}
957
958impl From<String> for Bytes {
959    fn from(s: String) -> Bytes {
960        Bytes::from(s.into_bytes())
961    }
962}
963
964impl From<Bytes> for Vec<u8> {
965    fn from(bytes: Bytes) -> Vec<u8> {
966        let bytes = ManuallyDrop::new(bytes);
967        unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
968    }
969}
970
971// ===== impl Vtable =====
972
973impl fmt::Debug for Vtable {
974    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
975        f.debug_struct("Vtable")
976            .field("clone", &(self.clone as *const ()))
977            .field("drop", &(self.drop as *const ()))
978            .finish()
979    }
980}
981
982// ===== impl StaticVtable =====
983
984const STATIC_VTABLE: Vtable = Vtable {
985    clone: static_clone,
986    to_vec: static_to_vec,
987    to_mut: static_to_mut,
988    is_unique: static_is_unique,
989    drop: static_drop,
990};
991
992unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
993    let slice = slice::from_raw_parts(ptr, len);
994    Bytes::from_static(slice)
995}
996
997unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
998    let slice = slice::from_raw_parts(ptr, len);
999    slice.to_vec()
1000}
1001
1002unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1003    let slice = slice::from_raw_parts(ptr, len);
1004    BytesMut::from(slice)
1005}
1006
1007fn static_is_unique(_: &AtomicPtr<()>) -> bool {
1008    false
1009}
1010
1011unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
1012    // nothing to drop for &'static [u8]
1013}
1014
1015// ===== impl PromotableVtable =====
1016
1017static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
1018    clone: promotable_even_clone,
1019    to_vec: promotable_even_to_vec,
1020    to_mut: promotable_even_to_mut,
1021    is_unique: promotable_is_unique,
1022    drop: promotable_even_drop,
1023};
1024
1025static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1026    clone: promotable_odd_clone,
1027    to_vec: promotable_odd_to_vec,
1028    to_mut: promotable_odd_to_mut,
1029    is_unique: promotable_is_unique,
1030    drop: promotable_odd_drop,
1031};
1032
1033unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1034    let shared = data.load(Ordering::Acquire);
1035    let kind = shared as usize & KIND_MASK;
1036
1037    if kind == KIND_ARC {
1038        shallow_clone_arc(shared.cast(), ptr, len)
1039    } else {
1040        debug_assert_eq!(kind, KIND_VEC);
1041        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1042        shallow_clone_vec(data, shared, buf, ptr, len)
1043    }
1044}
1045
1046unsafe fn promotable_to_vec(
1047    data: &AtomicPtr<()>,
1048    ptr: *const u8,
1049    len: usize,
1050    f: fn(*mut ()) -> *mut u8,
1051) -> Vec<u8> {
1052    let shared = data.load(Ordering::Acquire);
1053    let kind = shared as usize & KIND_MASK;
1054
1055    if kind == KIND_ARC {
1056        shared_to_vec_impl(shared.cast(), ptr, len)
1057    } else {
1058        // If Bytes holds a Vec, then the offset must be 0.
1059        debug_assert_eq!(kind, KIND_VEC);
1060
1061        let buf = f(shared);
1062
1063        let cap = offset_from(ptr, buf) + len;
1064
1065        // Copy back buffer
1066        ptr::copy(ptr, buf, len);
1067
1068        Vec::from_raw_parts(buf, len, cap)
1069    }
1070}
1071
1072unsafe fn promotable_to_mut(
1073    data: &AtomicPtr<()>,
1074    ptr: *const u8,
1075    len: usize,
1076    f: fn(*mut ()) -> *mut u8,
1077) -> BytesMut {
1078    let shared = data.load(Ordering::Acquire);
1079    let kind = shared as usize & KIND_MASK;
1080
1081    if kind == KIND_ARC {
1082        shared_to_mut_impl(shared.cast(), ptr, len)
1083    } else {
1084        // KIND_VEC is a view of an underlying buffer at a certain offset.
1085        // The ptr + len always represents the end of that buffer.
1086        // Before truncating it, it is first promoted to KIND_ARC.
1087        // Thus, we can safely reconstruct a Vec from it without leaking memory.
1088        debug_assert_eq!(kind, KIND_VEC);
1089
1090        let buf = f(shared);
1091        let off = offset_from(ptr, buf);
1092        let cap = off + len;
1093        let v = Vec::from_raw_parts(buf, cap, cap);
1094
1095        let mut b = BytesMut::from_vec(v);
1096        b.advance_unchecked(off);
1097        b
1098    }
1099}
1100
1101unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1102    promotable_to_vec(data, ptr, len, |shared| {
1103        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1104    })
1105}
1106
1107unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1108    promotable_to_mut(data, ptr, len, |shared| {
1109        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1110    })
1111}
1112
1113unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1114    data.with_mut(|shared| {
1115        let shared = *shared;
1116        let kind = shared as usize & KIND_MASK;
1117
1118        if kind == KIND_ARC {
1119            release_shared(shared.cast());
1120        } else {
1121            debug_assert_eq!(kind, KIND_VEC);
1122            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1123            free_boxed_slice(buf, ptr, len);
1124        }
1125    });
1126}
1127
1128unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1129    let shared = data.load(Ordering::Acquire);
1130    let kind = shared as usize & KIND_MASK;
1131
1132    if kind == KIND_ARC {
1133        shallow_clone_arc(shared as _, ptr, len)
1134    } else {
1135        debug_assert_eq!(kind, KIND_VEC);
1136        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1137    }
1138}
1139
1140unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1141    promotable_to_vec(data, ptr, len, |shared| shared.cast())
1142}
1143
1144unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1145    promotable_to_mut(data, ptr, len, |shared| shared.cast())
1146}
1147
1148unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1149    data.with_mut(|shared| {
1150        let shared = *shared;
1151        let kind = shared as usize & KIND_MASK;
1152
1153        if kind == KIND_ARC {
1154            release_shared(shared.cast());
1155        } else {
1156            debug_assert_eq!(kind, KIND_VEC);
1157
1158            free_boxed_slice(shared.cast(), ptr, len);
1159        }
1160    });
1161}
1162
1163unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1164    let shared = data.load(Ordering::Acquire);
1165    let kind = shared as usize & KIND_MASK;
1166
1167    if kind == KIND_ARC {
1168        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1169        ref_cnt == 1
1170    } else {
1171        true
1172    }
1173}
1174
1175unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1176    let cap = offset_from(offset, buf) + len;
1177    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1178}
1179
1180// ===== impl SharedVtable =====
1181
1182struct Shared {
1183    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
1184    buf: *mut u8,
1185    cap: usize,
1186    ref_cnt: AtomicUsize,
1187}
1188
1189impl Drop for Shared {
1190    fn drop(&mut self) {
1191        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1192    }
1193}
1194
1195// Assert that the alignment of `Shared` is divisible by 2.
1196// This is a necessary invariant since we depend on allocating `Shared` a
1197// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
1198// This flag is set when the LSB is 0.
1199const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
1200
1201static SHARED_VTABLE: Vtable = Vtable {
1202    clone: shared_clone,
1203    to_vec: shared_to_vec,
1204    to_mut: shared_to_mut,
1205    is_unique: shared_is_unique,
1206    drop: shared_drop,
1207};
1208
1209const KIND_ARC: usize = 0b0;
1210const KIND_VEC: usize = 0b1;
1211const KIND_MASK: usize = 0b1;
1212
1213unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1214    let shared = data.load(Ordering::Relaxed);
1215    shallow_clone_arc(shared as _, ptr, len)
1216}
1217
1218unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1219    // Check that the ref_cnt is 1 (unique).
1220    //
1221    // If it is unique, then it is set to 0 with AcqRel fence for the same
1222    // reason in release_shared.
1223    //
1224    // Otherwise, we take the other branch and call release_shared.
1225    if (*shared)
1226        .ref_cnt
1227        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1228        .is_ok()
1229    {
1230        // Deallocate the `Shared` instance without running its destructor.
1231        let shared = *Box::from_raw(shared);
1232        let shared = ManuallyDrop::new(shared);
1233        let buf = shared.buf;
1234        let cap = shared.cap;
1235
1236        // Copy back buffer
1237        ptr::copy(ptr, buf, len);
1238
1239        Vec::from_raw_parts(buf, len, cap)
1240    } else {
1241        let v = slice::from_raw_parts(ptr, len).to_vec();
1242        release_shared(shared);
1243        v
1244    }
1245}
1246
1247unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1248    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1249}
1250
1251unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1252    // The goal is to check if the current handle is the only handle
1253    // that currently has access to the buffer. This is done by
1254    // checking if the `ref_cnt` is currently 1.
1255    //
1256    // The `Acquire` ordering synchronizes with the `Release` as
1257    // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1258    // operation guarantees that any mutations done in other threads
1259    // are ordered before the `ref_cnt` is decremented. As such,
1260    // this `Acquire` will guarantee that those mutations are
1261    // visible to the current thread.
1262    //
1263    // Otherwise, we take the other branch, copy the data and call `release_shared`.
1264    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1265        // Deallocate the `Shared` instance without running its destructor.
1266        let shared = *Box::from_raw(shared);
1267        let shared = ManuallyDrop::new(shared);
1268        let buf = shared.buf;
1269        let cap = shared.cap;
1270
1271        // Rebuild Vec
1272        let off = offset_from(ptr, buf);
1273        let v = Vec::from_raw_parts(buf, len + off, cap);
1274
1275        let mut b = BytesMut::from_vec(v);
1276        b.advance_unchecked(off);
1277        b
1278    } else {
1279        // Copy the data from Shared in a new Vec, then release it
1280        let v = slice::from_raw_parts(ptr, len).to_vec();
1281        release_shared(shared);
1282        BytesMut::from_vec(v)
1283    }
1284}
1285
1286unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1287    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1288}
1289
1290pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1291    let shared = data.load(Ordering::Acquire);
1292    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1293    ref_cnt == 1
1294}
1295
1296unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1297    data.with_mut(|shared| {
1298        release_shared(shared.cast());
1299    });
1300}
1301
1302unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1303    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1304
1305    if old_size > usize::MAX >> 1 {
1306        crate::abort();
1307    }
1308
1309    Bytes {
1310        ptr,
1311        len,
1312        data: AtomicPtr::new(shared as _),
1313        vtable: &SHARED_VTABLE,
1314    }
1315}
1316
1317#[cold]
1318unsafe fn shallow_clone_vec(
1319    atom: &AtomicPtr<()>,
1320    ptr: *const (),
1321    buf: *mut u8,
1322    offset: *const u8,
1323    len: usize,
1324) -> Bytes {
1325    // If the buffer is still tracked in a `Vec<u8>`. It is time to
1326    // promote the vec to an `Arc`. This could potentially be called
1327    // concurrently, so some care must be taken.
1328
1329    // First, allocate a new `Shared` instance containing the
1330    // `Vec` fields. It's important to note that `ptr`, `len`,
1331    // and `cap` cannot be mutated without having `&mut self`.
1332    // This means that these fields will not be concurrently
1333    // updated and since the buffer hasn't been promoted to an
1334    // `Arc`, those three fields still are the components of the
1335    // vector.
1336    let shared = Box::new(Shared {
1337        buf,
1338        cap: offset_from(offset, buf) + len,
1339        // Initialize refcount to 2. One for this reference, and one
1340        // for the new clone that will be returned from
1341        // `shallow_clone`.
1342        ref_cnt: AtomicUsize::new(2),
1343    });
1344
1345    let shared = Box::into_raw(shared);
1346
1347    // The pointer should be aligned, so this assert should
1348    // always succeed.
1349    debug_assert!(
1350        0 == (shared as usize & KIND_MASK),
1351        "internal: Box<Shared> should have an aligned pointer",
1352    );
1353
1354    // Try compare & swapping the pointer into the `arc` field.
1355    // `Release` is used synchronize with other threads that
1356    // will load the `arc` field.
1357    //
1358    // If the `compare_exchange` fails, then the thread lost the
1359    // race to promote the buffer to shared. The `Acquire`
1360    // ordering will synchronize with the `compare_exchange`
1361    // that happened in the other thread and the `Shared`
1362    // pointed to by `actual` will be visible.
1363    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1364        Ok(actual) => {
1365            debug_assert!(actual as usize == ptr as usize);
1366            // The upgrade was successful, the new handle can be
1367            // returned.
1368            Bytes {
1369                ptr: offset,
1370                len,
1371                data: AtomicPtr::new(shared as _),
1372                vtable: &SHARED_VTABLE,
1373            }
1374        }
1375        Err(actual) => {
1376            // The upgrade failed, a concurrent clone happened. Release
1377            // the allocation that was made in this thread, it will not
1378            // be needed.
1379            let shared = Box::from_raw(shared);
1380            mem::forget(*shared);
1381
1382            // Buffer already promoted to shared storage, so increment ref
1383            // count.
1384            shallow_clone_arc(actual as _, offset, len)
1385        }
1386    }
1387}
1388
1389unsafe fn release_shared(ptr: *mut Shared) {
1390    // `Shared` storage... follow the drop steps from Arc.
1391    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1392        return;
1393    }
1394
1395    // This fence is needed to prevent reordering of use of the data and
1396    // deletion of the data.  Because it is marked `Release`, the decreasing
1397    // of the reference count synchronizes with this `Acquire` fence. This
1398    // means that use of the data happens before decreasing the reference
1399    // count, which happens before this fence, which happens before the
1400    // deletion of the data.
1401    //
1402    // As explained in the [Boost documentation][1],
1403    //
1404    // > It is important to enforce any possible access to the object in one
1405    // > thread (through an existing reference) to *happen before* deleting
1406    // > the object in a different thread. This is achieved by a "release"
1407    // > operation after dropping a reference (any access to the object
1408    // > through this reference must obviously happened before), and an
1409    // > "acquire" operation before deleting the object.
1410    //
1411    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1412    //
1413    // Thread sanitizer does not support atomic fences. Use an atomic load
1414    // instead.
1415    (*ptr).ref_cnt.load(Ordering::Acquire);
1416
1417    // Drop the data
1418    drop(Box::from_raw(ptr));
1419}
1420
1421// Ideally we would always use this version of `ptr_map` since it is strict
1422// provenance compatible, but it results in worse codegen. We will however still
1423// use it on miri because it gives better diagnostics for people who test bytes
1424// code with miri.
1425//
1426// See https://github.com/tokio-rs/bytes/pull/545 for more info.
1427#[cfg(miri)]
1428fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1429where
1430    F: FnOnce(usize) -> usize,
1431{
1432    let old_addr = ptr as usize;
1433    let new_addr = f(old_addr);
1434    let diff = new_addr.wrapping_sub(old_addr);
1435    ptr.wrapping_add(diff)
1436}
1437
1438#[cfg(not(miri))]
1439fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1440where
1441    F: FnOnce(usize) -> usize,
1442{
1443    let old_addr = ptr as usize;
1444    let new_addr = f(old_addr);
1445    new_addr as *mut u8
1446}
1447
1448fn without_provenance(ptr: usize) -> *const u8 {
1449    core::ptr::null::<u8>().wrapping_add(ptr)
1450}
1451
1452// compile-fails
1453
1454/// ```compile_fail
1455/// use bytes::Bytes;
1456/// #[deny(unused_must_use)]
1457/// {
1458///     let mut b1 = Bytes::from("hello world");
1459///     b1.split_to(6);
1460/// }
1461/// ```
1462fn _split_to_must_use() {}
1463
1464/// ```compile_fail
1465/// use bytes::Bytes;
1466/// #[deny(unused_must_use)]
1467/// {
1468///     let mut b1 = Bytes::from("hello world");
1469///     b1.split_off(6);
1470/// }
1471/// ```
1472fn _split_off_must_use() {}
1473
1474// fuzz tests
1475#[cfg(all(test, loom))]
1476mod fuzz {
1477    use loom::sync::Arc;
1478    use loom::thread;
1479
1480    use super::Bytes;
1481    #[test]
1482    fn bytes_cloning_vec() {
1483        loom::model(|| {
1484            let a = Bytes::from(b"abcdefgh".to_vec());
1485            let addr = a.as_ptr() as usize;
1486
1487            // test the Bytes::clone is Sync by putting it in an Arc
1488            let a1 = Arc::new(a);
1489            let a2 = a1.clone();
1490
1491            let t1 = thread::spawn(move || {
1492                let b: Bytes = (*a1).clone();
1493                assert_eq!(b.as_ptr() as usize, addr);
1494            });
1495
1496            let t2 = thread::spawn(move || {
1497                let b: Bytes = (*a2).clone();
1498                assert_eq!(b.as_ptr() as usize, addr);
1499            });
1500
1501            t1.join().unwrap();
1502            t2.join().unwrap();
1503        });
1504    }
1505}