File tree 2 files changed +23
-2
lines changed
2 files changed +23
-2
lines changed Original file line number Diff line number Diff line change @@ -58,7 +58,7 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
58
58
let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
59
59
if ( load_sz & chunk_sz) != 0 {
60
60
// Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
61
- * ( & raw mut out) . byte_add ( i) . cast:: <$ty>( ) = * src. byte_add ( i) . cast:: <$ty>( ) ;
61
+ * ( & raw mut out) . wrapping_byte_add ( i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( i) . cast:: <$ty>( ) ;
62
62
i |= chunk_sz;
63
63
}
64
64
) +} ;
@@ -91,7 +91,7 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
91
91
if ( load_sz & chunk_sz) != 0 {
92
92
// Since we are doing the small reads first, `start_shift + i` has in the mean
93
93
// time become aligned to `chunk_sz`.
94
- * ( & raw mut out) . byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. byte_add ( start_shift + i) . cast:: <$ty>( ) ;
94
+ * ( & raw mut out) . wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) ;
95
95
i |= chunk_sz;
96
96
}
97
97
) +} ;
Original file line number Diff line number Diff line change @@ -230,6 +230,27 @@ fn memmove_backward_aligned() {
230
230
}
231
231
}
232
232
233
+ #[ test]
234
+ fn memmove_misaligned_bounds ( ) {
235
+ // The above test have the downside that the addresses surrounding the range-to-copy are all
236
+ // still in-bounds, so Miri would not actually complain about OOB accesses. So we also test with
237
+ // an array that has just the right size. We test a few times to avoid it being accidentally
238
+ // aligned.
239
+ for _ in 0 ..8 {
240
+ let mut arr = [ 0u8 ; 17 + 1 ] ;
241
+ unsafe {
242
+ // Copy forward...
243
+ let src = arr. as_ptr ( ) . offset ( 0 ) ;
244
+ let dst = arr. as_mut_ptr ( ) . offset ( 1 ) ;
245
+ assert_eq ! ( memmove( dst, src, 17 ) , dst) ;
246
+ // ... and backward.
247
+ let src = arr. as_ptr ( ) . offset ( 1 ) ;
248
+ let dst = arr. as_mut_ptr ( ) . offset ( 0 ) ;
249
+ assert_eq ! ( memmove( dst, src, 17 ) , dst) ;
250
+ }
251
+ }
252
+ }
253
+
233
254
#[ test]
234
255
fn memset_backward_misaligned_nonaligned_start ( ) {
235
256
let mut arr = gen_arr :: < 32 > ( ) ;
You can’t perform that action at this time.
0 commit comments