@@ -56,7 +56,7 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
56
56
let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
57
57
if ( load_sz & chunk_sz) != 0 {
58
58
// Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
59
- * ( & raw mut out) . byte_add ( i) . cast:: <$ty>( ) = * src. byte_add ( i) . cast:: <$ty>( ) ;
59
+ * ( & raw mut out) . wrapping_byte_add ( i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( i) . cast:: <$ty>( ) ;
60
60
i |= chunk_sz;
61
61
}
62
62
) +} ;
@@ -69,9 +69,9 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
69
69
out
70
70
}
71
71
72
- /// Load `load_sz` many bytes from `src.byte_add (WORD_SIZE - load_sz)`. `src` must be `usize`-aligned.
73
- /// The bytes are returned as the *last* bytes of the return value, i.e., this acts as if we had done
74
- /// a `usize` read from `src`, with the out-of-bounds part filled with 0s.
72
+ /// Load `load_sz` many bytes from `src.wrapping_byte_add (WORD_SIZE - load_sz)`. `src` must be
73
+ /// `usize`-aligned. The bytes are returned as the *last* bytes of the return value, i.e., this acts
74
+ /// as if we had done a `usize` read from `src`, with the out-of-bounds part filled with 0s.
75
75
/// `load_sz` be strictly less than `WORD_SIZE`.
76
76
#[ cfg( not( feature = "mem-unaligned" ) ) ]
77
77
#[ inline( always) ]
@@ -87,7 +87,7 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
87
87
if ( load_sz & chunk_sz) != 0 {
88
88
// Since we are doing the small reads first, `start_shift + i` has in the mean
89
89
// time become aligned to `chunk_sz`.
90
- * ( & raw mut out) . byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. byte_add ( start_shift + i) . cast:: <$ty>( ) ;
90
+ * ( & raw mut out) . wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) ;
91
91
i |= chunk_sz;
92
92
}
93
93
) +} ;
@@ -142,7 +142,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
142
142
let shift = offset * 8 ;
143
143
144
144
// Realign src
145
- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
145
+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
146
146
let mut prev_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
147
147
148
148
while dest_usize. wrapping_add ( 1 ) < dest_end {
@@ -255,7 +255,7 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
255
255
let shift = offset * 8 ;
256
256
257
257
// Realign src
258
- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
258
+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
259
259
let mut prev_word = load_aligned_partial ( src_aligned, offset) ;
260
260
261
261
while dest_start. wrapping_add ( 1 ) < dest_usize {
0 commit comments