Skip to content

Commit d26edd9

Browse files
committed
Various utilities for getting an owning buffer
1 parent 2debb5b commit d26edd9

File tree

5 files changed

+310
-27
lines changed

5 files changed

+310
-27
lines changed

texel/src/buf.rs

Lines changed: 55 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ use alloc::rc::Rc;
88
use alloc::sync::Arc;
99
use alloc::vec::Vec;
1010

11+
use crate::rec::TexelBuffer;
1112
use crate::texel::{constants::MAX, AtomicPart, MaxAligned, MaxAtomic, MaxCell, Texel, MAX_ALIGN};
1213

1314
/// Allocates and manages raw bytes.
@@ -1017,6 +1018,12 @@ impl cmp::PartialEq<[u8]> for cell_buf {
10171018
}
10181019
}
10191020

1021+
impl cmp::PartialEq<cell_buf> for [u8] {
1022+
fn eq(&self, other: &cell_buf) -> bool {
1023+
crate::texels::U8.cell_bytes_eq(other.0.as_slice_of_cells(), self)
1024+
}
1025+
}
1026+
10201027
impl cmp::Eq for cell_buf {}
10211028

10221029
impl cmp::PartialEq for CellBuffer {
@@ -1370,6 +1377,24 @@ impl<'lt, P> AtomicSliceRef<'lt, P> {
13701377
self.texel.load_atomic_slice(*self, data);
13711378
}
13721379

1380+
/// Read all values into a newly allocated vector.
1381+
pub fn to_vec(&self) -> Vec<P> {
1382+
// FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
1383+
// of Texel<P> from that atomic.
1384+
let mut fresh: Vec<P> = (0..self.len()).map(|_| self.texel.zeroed()).collect();
1385+
self.write_to_slice(&mut fresh);
1386+
fresh
1387+
}
1388+
1389+
/// Read all values into a newly allocated texel buffer.
1390+
pub fn to_texel_buffer(&self) -> TexelBuffer<P> {
1391+
// FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
1392+
// of Texel<P> from that atomic.
1393+
let mut fresh = TexelBuffer::new_for_texel(self.texel, self.len());
1394+
self.write_to_slice(&mut fresh);
1395+
fresh
1396+
}
1397+
13731398
#[track_caller]
13741399
pub fn split_at(self, at: usize) -> (Self, Self) {
13751400
let left = self.index(..at);
@@ -1430,13 +1455,21 @@ impl<P> Copy for AtomicRef<'_, P> {}
14301455
///
14311456
/// Note this type also has the invariant that the identified range fits into memory for the given
14321457
/// texel type.
1433-
#[derive(Clone, Copy, Debug)]
1458+
#[derive(Debug)]
14341459
pub struct TexelRange<T> {
14351460
texel: Texel<T>,
14361461
start_per_align: usize,
14371462
end_per_align: usize,
14381463
}
14391464

1465+
impl<T> Clone for TexelRange<T> {
1466+
fn clone(&self) -> Self {
1467+
*self
1468+
}
1469+
}
1470+
1471+
impl<T> Copy for TexelRange<T> {}
1472+
14401473
impl<T> TexelRange<T> {
14411474
/// Create a new range from a texel type and a range (in units of `T`).
14421475
pub fn new(texel: Texel<T>, range: ops::Range<usize>) -> Option<Self> {
@@ -1504,14 +1537,19 @@ impl<T> TexelRange<T> {
15041537
end_per_align: end_byte / texel.align(),
15051538
})
15061539
}
1540+
1541+
/// Intrinsically, all ranges represent an aligned range of bytes.
1542+
fn aligned_byte_range(self) -> ops::Range<usize> {
1543+
let scale = self.texel.align();
1544+
scale * self.start_per_align..scale * self.end_per_align
1545+
}
15071546
}
15081547

15091548
impl<T> core::ops::Index<TexelRange<T>> for buf {
15101549
type Output = [T];
15111550

15121551
fn index(&self, index: TexelRange<T>) -> &Self::Output {
1513-
let scale = index.texel.align();
1514-
let bytes = &self.0[scale * index.start_per_align..scale * index.end_per_align];
1552+
let bytes = &self.0[index.aligned_byte_range()];
15151553
let slice = index.texel.try_to_slice(bytes);
15161554
// We just multiplied the indices by the alignment..
15171555
slice.expect("byte indices validly aligned")
@@ -1520,14 +1558,26 @@ impl<T> core::ops::Index<TexelRange<T>> for buf {
15201558

15211559
impl<T> core::ops::IndexMut<TexelRange<T>> for buf {
15221560
fn index_mut(&mut self, index: TexelRange<T>) -> &mut Self::Output {
1523-
let scale = index.texel.align();
1524-
let bytes = &mut self.0[scale * index.start_per_align..scale * index.end_per_align];
1561+
let bytes = &mut self.0[index.aligned_byte_range()];
15251562
let slice = index.texel.try_to_slice_mut(bytes);
15261563
// We just multiplied the indices by the alignment..
15271564
slice.expect("byte indices validly aligned")
15281565
}
15291566
}
15301567

1568+
impl<T> core::ops::Index<TexelRange<T>> for cell_buf {
1569+
type Output = [cell::Cell<T>];
1570+
1571+
fn index(&self, index: TexelRange<T>) -> &Self::Output {
1572+
let bytes = &self.0.as_slice_of_cells()[index.aligned_byte_range()];
1573+
let slice = index.texel.try_to_cell(bytes);
1574+
// We just multiplied the indices by the alignment..
1575+
slice
1576+
.expect("byte indices validly aligned")
1577+
.as_slice_of_cells()
1578+
}
1579+
}
1580+
15311581
impl Default for &'_ cell_buf {
15321582
fn default() -> Self {
15331583
cell_buf::new(&mut [])

texel/src/image.rs

Lines changed: 87 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -582,11 +582,37 @@ impl<'data, L> ImageRef<'data, L> {
582582
}
583583

584584
/// Copy all bytes to a newly allocated image.
585-
pub fn to_owned(&self) -> Image<L>
586-
where
587-
L: Layout + Clone,
588-
{
589-
Image::with_bytes(self.inner.layout().clone(), self.inner.as_bytes())
585+
///
586+
/// Note this will allocate a buffer according to the capacity length of this reference, not
587+
/// merely the layout. When this is not the intention, consider calling [`Self::split_layout`]
588+
/// or [`Self::truncate_layout`] respectively.
589+
///
590+
/// # Examples
591+
///
592+
/// Here we make an independent copy of the second plane of a composite image.
593+
///
594+
/// ```
595+
/// use image_texel::image::{Image, ImageRef};
596+
/// use image_texel::layout::{PlaneMatrices, Matrix};
597+
/// use image_texel::texels::U8;
598+
///
599+
/// let mat = Matrix::from_width_height(U8, 8, 8).unwrap();
600+
/// let buffer = Image::new(PlaneMatrices::<_, 2>::from_repeated(mat));
601+
///
602+
/// // … some code to initialize those planes.
603+
/// # let mut buffer = buffer;
604+
/// # buffer.as_mut().into_planes([1]).unwrap()[0]
605+
/// # .as_capacity_buf_mut()[..8].copy_from_slice(b"not zero");
606+
/// # let buffer = buffer;
607+
///
608+
/// let [p1] = buffer.as_ref().into_planes([1]).unwrap();
609+
/// let clone_of: Image<_> = p1.into_owned();
610+
///
611+
/// let [p1] = buffer.as_ref().into_planes([1]).unwrap();
612+
/// assert_eq!(clone_of.as_bytes(), p1.as_bytes());
613+
/// ```
614+
pub fn into_owned(self) -> Image<L> {
615+
self.inner.into_owned().into()
590616
}
591617

592618
/// Get a slice of the individual samples in the layout.
@@ -663,6 +689,18 @@ impl<'data, L> ImageRef<'data, L> {
663689
RawImage::from_buffer(Bytes(next.len()), next).into()
664690
}
665691

692+
/// Remove all past-the-layout bytes.
693+
///
694+
/// This is a utility to combine with pipelining. It is equivalent to calling
695+
/// [`Self::split_layout`] and discarding that result.
696+
pub fn truncate_layout(mut self) -> Self
697+
where
698+
L: Layout,
699+
{
700+
let _ = self.split_layout();
701+
self
702+
}
703+
666704
/// Split this reference into independent planes.
667705
///
668706
/// If any plane fails their indexing operation or would not be aligned to the required
@@ -885,14 +923,6 @@ impl<'data, L> ImageMut<'data, L> {
885923
Some(self.inner.checked_decay()?.into())
886924
}
887925

888-
/// Copy the bytes and layout to an owned container.
889-
pub fn to_owned(&self) -> Image<L>
890-
where
891-
L: Layout + Clone,
892-
{
893-
Image::with_bytes(self.inner.layout().clone(), self.inner.as_bytes())
894-
}
895-
896926
/// Get a slice of the individual samples in the layout.
897927
pub fn as_slice(&self) -> &[L::Sample]
898928
where
@@ -937,6 +967,38 @@ impl<'data, L> ImageMut<'data, L> {
937967
pixel.cast_mut_buf(self.inner.as_mut_buf())
938968
}
939969

970+
/// Copy all bytes to a newly allocated image.
971+
///
972+
/// Note this will allocate a buffer according to the capacity length of this reference, not
973+
/// merely the layout. When this is not the intention, consider calling [`Self::split_layout`]
974+
/// or [`Self::truncate_layout`] respectively.
975+
///
976+
/// # Examples
977+
///
978+
/// Here we make an independent copy of the second plane of a composite image.
979+
///
980+
/// ```
981+
/// use image_texel::image::{Image, ImageRef};
982+
/// use image_texel::layout::{PlaneMatrices, Matrix};
983+
/// use image_texel::texels::U8;
984+
///
985+
/// let mat = Matrix::from_width_height(U8, 8, 8).unwrap();
986+
/// let mut buffer = Image::new(PlaneMatrices::<_, 2>::from_repeated(mat));
987+
///
988+
/// // … some code to initialize those planes.
989+
/// # buffer.as_mut().into_planes([1]).unwrap()[0]
990+
/// # .as_capacity_buf_mut()[..8].copy_from_slice(b"not zero");
991+
///
992+
/// let [p1] = buffer.as_mut().into_planes([1]).unwrap();
993+
/// let clone_of: Image<_> = p1.into_owned();
994+
///
995+
/// let [p1] = buffer.as_ref().into_planes([1]).unwrap();
996+
/// assert_eq!(clone_of.as_bytes(), p1.as_bytes());
997+
/// ```
998+
pub fn into_owned(self) -> Image<L> {
999+
self.inner.into_owned().into()
1000+
}
1001+
9401002
/// Turn into a slice of the individual samples in the layout.
9411003
///
9421004
/// This preserves the lifetime with which the layout is borrowed from the underlying image,
@@ -1012,6 +1074,18 @@ impl<'data, L> ImageMut<'data, L> {
10121074
RawImage::from_buffer(Bytes(next.len()), next).into()
10131075
}
10141076

1077+
/// Remove all past-the-layout bytes.
1078+
///
1079+
/// This is a utility to combine with pipelining. It is equivalent to calling
1080+
/// [`Self::split_layout`] and discarding that result.
1081+
pub fn truncate_layout(mut self) -> Self
1082+
where
1083+
L: Layout,
1084+
{
1085+
let _ = self.split_layout();
1086+
self
1087+
}
1088+
10151089
/// Split this mutable reference into independent planes.
10161090
///
10171091
/// If any plane fails their indexing operation or would not be aligned to the required

texel/src/image/atomic.rs

Lines changed: 76 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
//!
33
//! Re-exported at its super `image` module.
44
use crate::buf::{atomic_buf, AtomicBuffer, AtomicSliceRef};
5-
use crate::image::{raw::RawImage, IntoPlanesError};
5+
use crate::image::{raw::RawImage, Image, IntoPlanesError};
66
use crate::layout::{Bytes, Decay, Layout, Mend, PlaneOf, Relocate, SliceLayout, Take, TryMend};
77
use crate::texel::{constants::U8, MAX_ALIGN};
88
use crate::{BufferReuseError, Texel, TexelBuffer};
@@ -146,6 +146,36 @@ impl<L: Layout> AtomicImage<L> {
146146
Some(self.inner.checked_decay()?.into())
147147
}
148148

149+
/// Copy all bytes to a newly allocated image.
150+
///
151+
/// Note this will allocate a buffer according to the capacity length of this reference, not
152+
/// merely the layout. When this is not the intention, consider calling [`Self::split_layout`]
153+
/// or [`Self::truncate_layout`] respectively.
154+
///
155+
/// # Examples
156+
///
157+
/// Here we make an independent copy of a pixel matrix image.
158+
///
159+
/// ```
160+
/// use image_texel::image::{AtomicImage, Image};
161+
/// use image_texel::layout::{PlaneMatrices, Matrix};
162+
/// use image_texel::texels::U8;
163+
///
164+
/// let matrix = Matrix::from_width_height(U8, 8, 8).unwrap();
165+
/// let buffer = AtomicImage::new(matrix);
166+
///
167+
/// // … some code to initialize those planes.
168+
/// # let data = buffer.as_texels(U8).index(0..8);
169+
/// # U8.store_atomic_slice(data, b"not zero");
170+
///
171+
/// let clone_of: Image<_> = buffer.clone().into_owned();
172+
///
173+
/// assert!(clone_of.as_bytes() == buffer.as_texels(U8).to_vec());
174+
/// ```
175+
pub fn into_owned(self) -> Image<L> {
176+
self.inner.into_owned().into()
177+
}
178+
149179
/// Move the bytes into a new image.
150180
///
151181
/// Afterwards, `self` will refer to an empty but unique new buffer.
@@ -438,6 +468,38 @@ impl<'data, L> AtomicImageRef<'data, L> {
438468
Some(self.inner.checked_decay()?.into())
439469
}
440470

471+
/// Copy all bytes to a newly allocated image.
472+
///
473+
/// Note this will allocate a buffer according to the capacity length of this reference, not
474+
/// merely the layout. When this is not the intention, consider calling [`Self::split_layout`]
475+
/// or [`Self::truncate_layout`] respectively.
476+
///
477+
/// # Examples
478+
///
479+
/// Here we make an independent copy of a pixel matrix image.
480+
///
481+
/// ```
482+
/// use image_texel::image::{AtomicImage, Image};
483+
/// use image_texel::layout::{PlaneMatrices, Matrix};
484+
/// use image_texel::texels::U8;
485+
///
486+
/// let matrix = Matrix::from_width_height(U8, 8, 8).unwrap();
487+
/// let buffer = AtomicImage::new(PlaneMatrices::<_, 2>::from_repeated(matrix));
488+
///
489+
/// // … some code to initialize those planes.
490+
/// # let [plane] = buffer.as_ref().into_planes([1]).unwrap();
491+
/// # let data = buffer.as_texels(U8).index(0..8);
492+
/// # U8.store_atomic_slice(data, b"not zero");
493+
///
494+
/// let [plane1] = buffer.as_ref().into_planes([1]).unwrap();
495+
/// let clone_of: Image<_> = plane1.clone().into_owned();
496+
///
497+
/// assert!(clone_of.as_bytes() == plane1.as_texels(U8).to_vec());
498+
/// ```
499+
pub fn into_owned(self) -> Image<L> {
500+
self.inner.into_owned().into()
501+
}
502+
441503
/// Get a slice of the individual samples in the layout.
442504
pub fn as_slice(&self) -> AtomicSliceRef<'_, L::Sample>
443505
where
@@ -473,11 +535,7 @@ impl<'data, L> AtomicImageRef<'data, L> {
473535
let (buffer, layout) = self.inner.into_parts();
474536
let len = layout.byte_len();
475537

476-
// FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
477-
// of Texel<P> from that atomic.
478-
let mut target = alloc::vec![0; len];
479-
U8.load_atomic_slice(buffer.as_texels(U8).truncate_bytes(len), &mut target);
480-
target
538+
buffer.as_texels(U8).truncate_bytes(len).to_vec()
481539
}
482540

483541
/// Turn into a slice of the individual samples in the layout.
@@ -548,6 +606,18 @@ impl<'data, L> AtomicImageRef<'data, L> {
548606
RawImage::from_buffer(Bytes(next.len()), next).into()
549607
}
550608

609+
/// Remove all past-the-layout bytes.
610+
///
611+
/// This is a utility to combine with pipelining. It is equivalent to calling
612+
/// [`Self::split_layout`] and discarding that result.
613+
pub fn truncate_layout(mut self) -> Self
614+
where
615+
L: Layout,
616+
{
617+
let _ = self.split_layout();
618+
self
619+
}
620+
551621
/// Split this reference into independent planes.
552622
///
553623
/// If any plane fails their indexing operation or would not be aligned to the required

0 commit comments

Comments
 (0)