Skip to content

Commit bd7576b

Browse files
author
Aurelia Molzer
authored
Merge pull request #64 from image-rs/byte-buffer-wrappers
Polish byte buffer wrappers for release
2 parents ce0cf68 + cb74917 commit bd7576b

File tree

9 files changed

+1021
-43
lines changed

9 files changed

+1021
-43
lines changed

canvas/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ repository = "https://github.com/image-rs/canvas"
1212
categories = ["multimedia::images"]
1313

1414
[dependencies]
15-
image-texel = { path = "../texel", version = "0.4.0" }
15+
image-texel = { path = "../texel", version = "0.5.0" }
1616
bytemuck = "1.1"
1717

1818
[dev-dependencies]

texel/Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
[package]
22
name = "image-texel"
3-
version = "0.4.0"
3+
version = "0.5.0"
44
edition = "2021"
55
rust-version = "1.84"
66

77
description = "A texel type and allocated buffers suitable for image data."
8-
authors = ["Aurelia Molzer <[email protected]>"]
8+
authors = ["Aurelia Molzer <[email protected]>"]
99
license = "MIT"
1010
readme = "Readme.md"
1111
documentation = "https://docs.rs/image-texel"

texel/src/buf.rs

Lines changed: 109 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ use alloc::rc::Rc;
88
use alloc::sync::Arc;
99
use alloc::vec::Vec;
1010

11+
use crate::rec::TexelBuffer;
1112
use crate::texel::{constants::MAX, AtomicPart, MaxAligned, MaxAtomic, MaxCell, Texel, MAX_ALIGN};
1213

1314
/// Allocates and manages raw bytes.
@@ -1017,6 +1018,12 @@ impl cmp::PartialEq<[u8]> for cell_buf {
10171018
}
10181019
}
10191020

1021+
impl cmp::PartialEq<cell_buf> for [u8] {
1022+
fn eq(&self, other: &cell_buf) -> bool {
1023+
crate::texels::U8.cell_bytes_eq(other.0.as_slice_of_cells(), self)
1024+
}
1025+
}
1026+
10201027
impl cmp::Eq for cell_buf {}
10211028

10221029
impl cmp::PartialEq for CellBuffer {
@@ -1370,6 +1377,24 @@ impl<'lt, P> AtomicSliceRef<'lt, P> {
13701377
self.texel.load_atomic_slice(*self, data);
13711378
}
13721379

1380+
/// Read all values into a newly allocated vector.
1381+
pub fn to_vec(&self) -> Vec<P> {
1382+
// FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
1383+
// of Texel<P> from that atomic.
1384+
let mut fresh: Vec<P> = (0..self.len()).map(|_| self.texel.zeroed()).collect();
1385+
self.write_to_slice(&mut fresh);
1386+
fresh
1387+
}
1388+
1389+
/// Read all values into a newly allocated texel buffer.
1390+
pub fn to_texel_buffer(&self) -> TexelBuffer<P> {
1391+
// FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
1392+
// of Texel<P> from that atomic.
1393+
let mut fresh = TexelBuffer::new_for_texel(self.texel, self.len());
1394+
self.write_to_slice(&mut fresh);
1395+
fresh
1396+
}
1397+
13731398
#[track_caller]
13741399
pub fn split_at(self, at: usize) -> (Self, Self) {
13751400
let left = self.index(..at);
@@ -1418,6 +1443,34 @@ impl<P> Clone for AtomicSliceRef<'_, P> {
14181443

14191444
impl<P> Copy for AtomicSliceRef<'_, P> {}
14201445

1446+
impl<P> AtomicRef<'_, P> {
1447+
/// Modify the value stored in the reference.
1448+
///
1449+
/// Note that this does *not* promise to be atomic in the whole value, just that it atomically
1450+
/// modifies the underlying buffer elements. The bytes of the value may be torn if another
1451+
/// write happens concurrently to the same element.
1452+
///
1453+
/// However, it is guaranteed that the contents of any other non-aliased value in the buffer is
1454+
/// not modified even if they share the same atomic unit.
1455+
pub fn store(self, value: P) {
1456+
self.texel.store_atomic(self, value);
1457+
}
1458+
1459+
/// Retrieve a value stored in the reference.
1460+
///
1461+
/// Note that this does *not* promise to be atomic in the whole value, just that it atomically
1462+
/// reads from the underlying buffer. The bytes of the value may be torn if another write
1463+
/// happens concurrently to the same element.
1464+
///
1465+
/// If no such write occurs concurrently, when all writes are ordered-before or ordered-after
1466+
/// this load then the value is correct. This needs only hold to writes accessing the bytes
1467+
/// making up _this value_. Even if another values shares atomic units with this value their
1468+
/// writes are guaranteed to never modify the bits of this value.
1469+
pub fn load(self) -> P {
1470+
self.texel.load_atomic(self)
1471+
}
1472+
}
1473+
14211474
impl<P> Clone for AtomicRef<'_, P> {
14221475
fn clone(&self) -> Self {
14231476
AtomicRef { ..*self }
@@ -1430,13 +1483,21 @@ impl<P> Copy for AtomicRef<'_, P> {}
14301483
///
14311484
/// Note this type also has the invariant that the identified range fits into memory for the given
14321485
/// texel type.
1433-
#[derive(Clone, Copy, Debug)]
1486+
#[derive(Debug)]
14341487
pub struct TexelRange<T> {
14351488
texel: Texel<T>,
14361489
start_per_align: usize,
14371490
end_per_align: usize,
14381491
}
14391492

1493+
impl<T> Clone for TexelRange<T> {
1494+
fn clone(&self) -> Self {
1495+
*self
1496+
}
1497+
}
1498+
1499+
impl<T> Copy for TexelRange<T> {}
1500+
14401501
impl<T> TexelRange<T> {
14411502
/// Create a new range from a texel type and a range (in units of `T`).
14421503
pub fn new(texel: Texel<T>, range: ops::Range<usize>) -> Option<Self> {
@@ -1504,14 +1565,19 @@ impl<T> TexelRange<T> {
15041565
end_per_align: end_byte / texel.align(),
15051566
})
15061567
}
1568+
1569+
/// Intrinsically, all ranges represent an aligned range of bytes.
1570+
fn aligned_byte_range(self) -> ops::Range<usize> {
1571+
let scale = self.texel.align();
1572+
scale * self.start_per_align..scale * self.end_per_align
1573+
}
15071574
}
15081575

15091576
impl<T> core::ops::Index<TexelRange<T>> for buf {
15101577
type Output = [T];
15111578

15121579
fn index(&self, index: TexelRange<T>) -> &Self::Output {
1513-
let scale = index.texel.align();
1514-
let bytes = &self.0[scale * index.start_per_align..scale * index.end_per_align];
1580+
let bytes = &self.0[index.aligned_byte_range()];
15151581
let slice = index.texel.try_to_slice(bytes);
15161582
// We just multiplied the indices by the alignment..
15171583
slice.expect("byte indices validly aligned")
@@ -1520,14 +1586,26 @@ impl<T> core::ops::Index<TexelRange<T>> for buf {
15201586

15211587
impl<T> core::ops::IndexMut<TexelRange<T>> for buf {
15221588
fn index_mut(&mut self, index: TexelRange<T>) -> &mut Self::Output {
1523-
let scale = index.texel.align();
1524-
let bytes = &mut self.0[scale * index.start_per_align..scale * index.end_per_align];
1589+
let bytes = &mut self.0[index.aligned_byte_range()];
15251590
let slice = index.texel.try_to_slice_mut(bytes);
15261591
// We just multiplied the indices by the alignment..
15271592
slice.expect("byte indices validly aligned")
15281593
}
15291594
}
15301595

1596+
impl<T> core::ops::Index<TexelRange<T>> for cell_buf {
1597+
type Output = [cell::Cell<T>];
1598+
1599+
fn index(&self, index: TexelRange<T>) -> &Self::Output {
1600+
let bytes = &self.0.as_slice_of_cells()[index.aligned_byte_range()];
1601+
let slice = index.texel.try_to_cell(bytes);
1602+
// We just multiplied the indices by the alignment..
1603+
slice
1604+
.expect("byte indices validly aligned")
1605+
.as_slice_of_cells()
1606+
}
1607+
}
1608+
15311609
impl Default for &'_ cell_buf {
15321610
fn default() -> Self {
15331611
cell_buf::new(&mut [])
@@ -2084,4 +2162,30 @@ mod tests {
20842162
);
20852163
}
20862164
}
2165+
2166+
#[test]
2167+
fn atomic_memory_move() {
2168+
const COPY_LEN: usize = 3 * core::mem::size_of::<MaxAtomic>();
2169+
const TOTAL_LEN: usize = 4 * core::mem::size_of::<MaxAtomic>();
2170+
2171+
for offset in 0..4 {
2172+
let data = [const { MaxAtomic::zero() }; 4];
2173+
let lhs = atomic_buf::new(&data[..]);
2174+
2175+
let data = [const { MaxAtomic::zero() }; 4];
2176+
let rhs = atomic_buf::new(&data[..]);
2177+
2178+
U8.store_atomic_slice(lhs.as_texels(U8).index(0..4), b"helo");
2179+
2180+
U8.atomic_memory_move(
2181+
lhs.as_texels(U8).index(offset..offset + COPY_LEN),
2182+
rhs.as_texels(U8).index(0..COPY_LEN),
2183+
);
2184+
2185+
let mut buffer = [0x42; TOTAL_LEN];
2186+
U8.load_atomic_slice(rhs.as_texels(U8), &mut buffer);
2187+
2188+
assert_eq!(buffer[..4], b"helo\0\0\0\0"[offset..][..4]);
2189+
}
2190+
}
20872191
}

texel/src/image.rs

Lines changed: 90 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,10 @@
1010
//! advised, probably very common, and the only 'supported' use-case).
1111
mod atomic;
1212
mod cell;
13-
mod data;
1413
mod raw;
1514

15+
pub mod data;
16+
1617
use core::{fmt, ops};
1718

1819
pub(crate) use self::raw::RawImage;
@@ -26,7 +27,7 @@ use crate::{BufferReuseError, Texel, TexelBuffer};
2627
pub use crate::stride::{StridedBufferMut, StridedBufferRef};
2728
pub use atomic::{AtomicImage, AtomicImageRef};
2829
pub use cell::{CellImage, CellImageRef};
29-
pub use data::{DataCells, DataMut, DataRef};
30+
pub use data::{AsCopySource, AsCopyTarget, DataCells, DataMut, DataRef};
3031

3132
/// A container of allocated bytes, parameterized over the layout.
3233
///
@@ -582,11 +583,37 @@ impl<'data, L> ImageRef<'data, L> {
582583
}
583584

584585
/// Copy all bytes to a newly allocated image.
585-
pub fn to_owned(&self) -> Image<L>
586-
where
587-
L: Layout + Clone,
588-
{
589-
Image::with_bytes(self.inner.layout().clone(), self.inner.as_bytes())
586+
///
587+
/// Note this will allocate a buffer according to the capacity length of this reference, not
588+
/// merely the layout. When this is not the intention, consider calling [`Self::split_layout`]
589+
/// or [`Self::truncate_layout`] respectively.
590+
///
591+
/// # Examples
592+
///
593+
/// Here we make an independent copy of the second plane of a composite image.
594+
///
595+
/// ```
596+
/// use image_texel::image::{Image, ImageRef};
597+
/// use image_texel::layout::{PlaneMatrices, Matrix};
598+
/// use image_texel::texels::U8;
599+
///
600+
/// let mat = Matrix::from_width_height(U8, 8, 8).unwrap();
601+
/// let buffer = Image::new(PlaneMatrices::<_, 2>::from_repeated(mat));
602+
///
603+
/// // … some code to initialize those planes.
604+
/// # let mut buffer = buffer;
605+
/// # buffer.as_mut().into_planes([1]).unwrap()[0]
606+
/// # .as_capacity_buf_mut()[..8].copy_from_slice(b"not zero");
607+
/// # let buffer = buffer;
608+
///
609+
/// let [p1] = buffer.as_ref().into_planes([1]).unwrap();
610+
/// let clone_of: Image<_> = p1.into_owned();
611+
///
612+
/// let [p1] = buffer.as_ref().into_planes([1]).unwrap();
613+
/// assert_eq!(clone_of.as_bytes(), p1.as_bytes());
614+
/// ```
615+
pub fn into_owned(self) -> Image<L> {
616+
self.inner.into_owned().into()
590617
}
591618

592619
/// Get a slice of the individual samples in the layout.
@@ -663,6 +690,18 @@ impl<'data, L> ImageRef<'data, L> {
663690
RawImage::from_buffer(Bytes(next.len()), next).into()
664691
}
665692

693+
/// Remove all past-the-layout bytes.
694+
///
695+
/// This is a utility to combine with pipelining. It is equivalent to calling
696+
/// [`Self::split_layout`] and discarding that result.
697+
pub fn truncate_layout(mut self) -> Self
698+
where
699+
L: Layout,
700+
{
701+
let _ = self.split_layout();
702+
self
703+
}
704+
666705
/// Split this reference into independent planes.
667706
///
668707
/// If any plane fails their indexing operation or would not be aligned to the required
@@ -885,14 +924,6 @@ impl<'data, L> ImageMut<'data, L> {
885924
Some(self.inner.checked_decay()?.into())
886925
}
887926

888-
/// Copy the bytes and layout to an owned container.
889-
pub fn to_owned(&self) -> Image<L>
890-
where
891-
L: Layout + Clone,
892-
{
893-
Image::with_bytes(self.inner.layout().clone(), self.inner.as_bytes())
894-
}
895-
896927
/// Get a slice of the individual samples in the layout.
897928
pub fn as_slice(&self) -> &[L::Sample]
898929
where
@@ -937,6 +968,38 @@ impl<'data, L> ImageMut<'data, L> {
937968
pixel.cast_mut_buf(self.inner.as_mut_buf())
938969
}
939970

971+
/// Copy all bytes to a newly allocated image.
972+
///
973+
/// Note this will allocate a buffer according to the capacity length of this reference, not
974+
/// merely the layout. When this is not the intention, consider calling [`Self::split_layout`]
975+
/// or [`Self::truncate_layout`] respectively.
976+
///
977+
/// # Examples
978+
///
979+
/// Here we make an independent copy of the second plane of a composite image.
980+
///
981+
/// ```
982+
/// use image_texel::image::{Image, ImageRef};
983+
/// use image_texel::layout::{PlaneMatrices, Matrix};
984+
/// use image_texel::texels::U8;
985+
///
986+
/// let mat = Matrix::from_width_height(U8, 8, 8).unwrap();
987+
/// let mut buffer = Image::new(PlaneMatrices::<_, 2>::from_repeated(mat));
988+
///
989+
/// // … some code to initialize those planes.
990+
/// # buffer.as_mut().into_planes([1]).unwrap()[0]
991+
/// # .as_capacity_buf_mut()[..8].copy_from_slice(b"not zero");
992+
///
993+
/// let [p1] = buffer.as_mut().into_planes([1]).unwrap();
994+
/// let clone_of: Image<_> = p1.into_owned();
995+
///
996+
/// let [p1] = buffer.as_ref().into_planes([1]).unwrap();
997+
/// assert_eq!(clone_of.as_bytes(), p1.as_bytes());
998+
/// ```
999+
pub fn into_owned(self) -> Image<L> {
1000+
self.inner.into_owned().into()
1001+
}
1002+
9401003
/// Turn into a slice of the individual samples in the layout.
9411004
///
9421005
/// This preserves the lifetime with which the layout is borrowed from the underlying image,
@@ -1012,6 +1075,18 @@ impl<'data, L> ImageMut<'data, L> {
10121075
RawImage::from_buffer(Bytes(next.len()), next).into()
10131076
}
10141077

1078+
/// Remove all past-the-layout bytes.
1079+
///
1080+
/// This is a utility to combine with pipelining. It is equivalent to calling
1081+
/// [`Self::split_layout`] and discarding that result.
1082+
pub fn truncate_layout(mut self) -> Self
1083+
where
1084+
L: Layout,
1085+
{
1086+
let _ = self.split_layout();
1087+
self
1088+
}
1089+
10151090
/// Split this mutable reference into independent planes.
10161091
///
10171092
/// If any plane fails their indexing operation or would not be aligned to the required

0 commit comments

Comments
 (0)