@@ -8,6 +8,7 @@ use alloc::rc::Rc;
88use alloc:: sync:: Arc ;
99use alloc:: vec:: Vec ;
1010
11+ use crate :: rec:: TexelBuffer ;
1112use crate :: texel:: { constants:: MAX , AtomicPart , MaxAligned , MaxAtomic , MaxCell , Texel , MAX_ALIGN } ;
1213
1314/// Allocates and manages raw bytes.
@@ -1017,6 +1018,12 @@ impl cmp::PartialEq<[u8]> for cell_buf {
10171018 }
10181019}
10191020
1021+ impl cmp:: PartialEq < cell_buf > for [ u8 ] {
1022+ fn eq ( & self , other : & cell_buf ) -> bool {
1023+ crate :: texels:: U8 . cell_bytes_eq ( other. 0 . as_slice_of_cells ( ) , self )
1024+ }
1025+ }
1026+
10201027impl cmp:: Eq for cell_buf { }
10211028
10221029impl cmp:: PartialEq for CellBuffer {
@@ -1370,6 +1377,24 @@ impl<'lt, P> AtomicSliceRef<'lt, P> {
13701377 self . texel . load_atomic_slice ( * self , data) ;
13711378 }
13721379
1380+ /// Read all values into a newly allocated vector.
1381+ pub fn to_vec ( & self ) -> Vec < P > {
1382+ // FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
1383+ // of Texel<P> from that atomic.
1384+ let mut fresh: Vec < P > = ( 0 ..self . len ( ) ) . map ( |_| self . texel . zeroed ( ) ) . collect ( ) ;
1385+ self . write_to_slice ( & mut fresh) ;
1386+ fresh
1387+ }
1388+
1389+ /// Read all values into a newly allocated texel buffer.
1390+ pub fn to_texel_buffer ( & self ) -> TexelBuffer < P > {
1391+ // FIXME: avoid zero-initializing. Might need a bit more unsafe code that extends a vector
1392+ // of Texel<P> from that atomic.
1393+ let mut fresh = TexelBuffer :: new_for_texel ( self . texel , self . len ( ) ) ;
1394+ self . write_to_slice ( & mut fresh) ;
1395+ fresh
1396+ }
1397+
13731398 #[ track_caller]
13741399 pub fn split_at ( self , at : usize ) -> ( Self , Self ) {
13751400 let left = self . index ( ..at) ;
@@ -1418,6 +1443,34 @@ impl<P> Clone for AtomicSliceRef<'_, P> {
14181443
14191444impl < P > Copy for AtomicSliceRef < ' _ , P > { }
14201445
1446+ impl < P > AtomicRef < ' _ , P > {
1447+ /// Modify the value stored in the reference.
1448+ ///
1449+ /// Note that this does *not* promise to be atomic in the whole value, just that it atomically
1450+ /// modifies the underlying buffer elements. The bytes of the value may be torn if another
1451+ /// write happens concurrently to the same element.
1452+ ///
1453+ /// However, it is guaranteed that the contents of any other non-aliased value in the buffer is
1454+ /// not modified even if they share the same atomic unit.
1455+ pub fn store ( self , value : P ) {
1456+ self . texel . store_atomic ( self , value) ;
1457+ }
1458+
1459+ /// Retrieve a value stored in the reference.
1460+ ///
1461+ /// Note that this does *not* promise to be atomic in the whole value, just that it atomically
1462+ /// reads from the underlying buffer. The bytes of the value may be torn if another write
1463+ /// happens concurrently to the same element.
1464+ ///
1465+ /// If no such write occurs concurrently, when all writes are ordered-before or ordered-after
1466+ /// this load then the value is correct. This needs only hold to writes accessing the bytes
1467+ /// making up _this value_. Even if another values shares atomic units with this value their
1468+ /// writes are guaranteed to never modify the bits of this value.
1469+ pub fn load ( self ) -> P {
1470+ self . texel . load_atomic ( self )
1471+ }
1472+ }
1473+
14211474impl < P > Clone for AtomicRef < ' _ , P > {
14221475 fn clone ( & self ) -> Self {
14231476 AtomicRef { ..* self }
@@ -1430,13 +1483,21 @@ impl<P> Copy for AtomicRef<'_, P> {}
14301483///
14311484/// Note this type also has the invariant that the identified range fits into memory for the given
14321485/// texel type.
1433- #[ derive( Clone , Copy , Debug ) ]
1486+ #[ derive( Debug ) ]
14341487pub struct TexelRange < T > {
14351488 texel : Texel < T > ,
14361489 start_per_align : usize ,
14371490 end_per_align : usize ,
14381491}
14391492
1493+ impl < T > Clone for TexelRange < T > {
1494+ fn clone ( & self ) -> Self {
1495+ * self
1496+ }
1497+ }
1498+
1499+ impl < T > Copy for TexelRange < T > { }
1500+
14401501impl < T > TexelRange < T > {
14411502 /// Create a new range from a texel type and a range (in units of `T`).
14421503 pub fn new ( texel : Texel < T > , range : ops:: Range < usize > ) -> Option < Self > {
@@ -1504,14 +1565,19 @@ impl<T> TexelRange<T> {
15041565 end_per_align : end_byte / texel. align ( ) ,
15051566 } )
15061567 }
1568+
1569+ /// Intrinsically, all ranges represent an aligned range of bytes.
1570+ fn aligned_byte_range ( self ) -> ops:: Range < usize > {
1571+ let scale = self . texel . align ( ) ;
1572+ scale * self . start_per_align ..scale * self . end_per_align
1573+ }
15071574}
15081575
15091576impl < T > core:: ops:: Index < TexelRange < T > > for buf {
15101577 type Output = [ T ] ;
15111578
15121579 fn index ( & self , index : TexelRange < T > ) -> & Self :: Output {
1513- let scale = index. texel . align ( ) ;
1514- let bytes = & self . 0 [ scale * index. start_per_align ..scale * index. end_per_align ] ;
1580+ let bytes = & self . 0 [ index. aligned_byte_range ( ) ] ;
15151581 let slice = index. texel . try_to_slice ( bytes) ;
15161582 // We just multiplied the indices by the alignment..
15171583 slice. expect ( "byte indices validly aligned" )
@@ -1520,14 +1586,26 @@ impl<T> core::ops::Index<TexelRange<T>> for buf {
15201586
15211587impl < T > core:: ops:: IndexMut < TexelRange < T > > for buf {
15221588 fn index_mut ( & mut self , index : TexelRange < T > ) -> & mut Self :: Output {
1523- let scale = index. texel . align ( ) ;
1524- let bytes = & mut self . 0 [ scale * index. start_per_align ..scale * index. end_per_align ] ;
1589+ let bytes = & mut self . 0 [ index. aligned_byte_range ( ) ] ;
15251590 let slice = index. texel . try_to_slice_mut ( bytes) ;
15261591 // We just multiplied the indices by the alignment..
15271592 slice. expect ( "byte indices validly aligned" )
15281593 }
15291594}
15301595
1596+ impl < T > core:: ops:: Index < TexelRange < T > > for cell_buf {
1597+ type Output = [ cell:: Cell < T > ] ;
1598+
1599+ fn index ( & self , index : TexelRange < T > ) -> & Self :: Output {
1600+ let bytes = & self . 0 . as_slice_of_cells ( ) [ index. aligned_byte_range ( ) ] ;
1601+ let slice = index. texel . try_to_cell ( bytes) ;
1602+ // We just multiplied the indices by the alignment..
1603+ slice
1604+ . expect ( "byte indices validly aligned" )
1605+ . as_slice_of_cells ( )
1606+ }
1607+ }
1608+
15311609impl Default for & ' _ cell_buf {
15321610 fn default ( ) -> Self {
15331611 cell_buf:: new ( & mut [ ] )
@@ -2084,4 +2162,30 @@ mod tests {
20842162 ) ;
20852163 }
20862164 }
2165+
2166+ #[ test]
2167+ fn atomic_memory_move ( ) {
2168+ const COPY_LEN : usize = 3 * core:: mem:: size_of :: < MaxAtomic > ( ) ;
2169+ const TOTAL_LEN : usize = 4 * core:: mem:: size_of :: < MaxAtomic > ( ) ;
2170+
2171+ for offset in 0 ..4 {
2172+ let data = [ const { MaxAtomic :: zero ( ) } ; 4 ] ;
2173+ let lhs = atomic_buf:: new ( & data[ ..] ) ;
2174+
2175+ let data = [ const { MaxAtomic :: zero ( ) } ; 4 ] ;
2176+ let rhs = atomic_buf:: new ( & data[ ..] ) ;
2177+
2178+ U8 . store_atomic_slice ( lhs. as_texels ( U8 ) . index ( 0 ..4 ) , b"helo" ) ;
2179+
2180+ U8 . atomic_memory_move (
2181+ lhs. as_texels ( U8 ) . index ( offset..offset + COPY_LEN ) ,
2182+ rhs. as_texels ( U8 ) . index ( 0 ..COPY_LEN ) ,
2183+ ) ;
2184+
2185+ let mut buffer = [ 0x42 ; TOTAL_LEN ] ;
2186+ U8 . load_atomic_slice ( rhs. as_texels ( U8 ) , & mut buffer) ;
2187+
2188+ assert_eq ! ( buffer[ ..4 ] , b"helo\0 \0 \0 \0 " [ offset..] [ ..4 ] ) ;
2189+ }
2190+ }
20872191}
0 commit comments