Skip to content

Commit ca8c54b

Browse files
committed
add test to make Miri able to detect OOB in memmove
1 parent 20d0860 commit ca8c54b

File tree

2 files changed

+24
-7
lines changed

2 files changed

+24
-7
lines changed

src/mem/impls.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
5656
let chunk_sz = core::mem::size_of::<$ty>();
5757
if (load_sz & chunk_sz) != 0 {
5858
// Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
59-
*(&raw mut out).byte_add(i).cast::<$ty>() = *src.byte_add(i).cast::<$ty>();
59+
*(&raw mut out).wrapping_byte_add(i).cast::<$ty>() = *src.wrapping_byte_add(i).cast::<$ty>();
6060
i |= chunk_sz;
6161
}
6262
)+};
@@ -69,9 +69,9 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
6969
out
7070
}
7171

72-
/// Load `load_sz` many bytes from `src.byte_add(WORD_SIZE - load_sz)`. `src` must be `usize`-aligned.
73-
/// The bytes are returned as the *last* bytes of the return value, i.e., this acts as if we had done
74-
/// a `usize` read from `src`, with the out-of-bounds part filled with 0s.
72+
/// Load `load_sz` many bytes from `src.wrapping_byte_add(WORD_SIZE - load_sz)`. `src` must be
73+
/// `usize`-aligned. The bytes are returned as the *last* bytes of the return value, i.e., this acts
74+
/// as if we had done a `usize` read from `src`, with the out-of-bounds part filled with 0s.
7575
/// `load_sz` be strictly less than `WORD_SIZE`.
7676
#[cfg(not(feature = "mem-unaligned"))]
7777
#[inline(always)]
@@ -87,7 +87,7 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
8787
if (load_sz & chunk_sz) != 0 {
8888
// Since we are doing the small reads first, `start_shift + i` has in the mean
8989
// time become aligned to `chunk_sz`.
90-
*(&raw mut out).byte_add(start_shift + i).cast::<$ty>() = *src.byte_add(start_shift + i).cast::<$ty>();
90+
*(&raw mut out).wrapping_byte_add(start_shift + i).cast::<$ty>() = *src.wrapping_byte_add(start_shift + i).cast::<$ty>();
9191
i |= chunk_sz;
9292
}
9393
)+};
@@ -142,7 +142,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
142142
let shift = offset * 8;
143143

144144
// Realign src
145-
let mut src_aligned = src.byte_sub(offset) as *mut usize;
145+
let mut src_aligned = src.wrapping_byte_sub(offset) as *mut usize;
146146
let mut prev_word = load_aligned_end_partial(src_aligned, WORD_SIZE - offset);
147147

148148
while dest_usize.wrapping_add(1) < dest_end {
@@ -255,7 +255,7 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
255255
let shift = offset * 8;
256256

257257
// Realign src
258-
let mut src_aligned = src.byte_sub(offset) as *mut usize;
258+
let mut src_aligned = src.wrapping_byte_sub(offset) as *mut usize;
259259
let mut prev_word = load_aligned_partial(src_aligned, offset);
260260

261261
while dest_start.wrapping_add(1) < dest_usize {

testcrate/tests/mem.rs

+17
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,23 @@ fn memmove_backward_aligned() {
230230
}
231231
}
232232

233+
#[test]
234+
fn memmove_misaligned_bounds() {
235+
// The above test have the downside that the addresses surrounding the range-to-copy are all
236+
// still in-bounds, so Miri would not actually complain about OOB accesses. So we also test with
237+
// an array that has just the right size. We test a few times to avoid it being accidentally
238+
// aligned.
239+
for _ in 0..8 {
240+
let mut arr1 = [0u8; 17];
241+
let mut arr2 = [0u8; 17];
242+
unsafe {
243+
// Copy both ways so we hit both the forward and backward cases.
244+
memmove(arr1.as_mut_ptr(), arr2.as_mut_ptr(), 17);
245+
memmove(arr2.as_mut_ptr(), arr1.as_mut_ptr(), 17);
246+
}
247+
}
248+
}
249+
233250
#[test]
234251
fn memset_backward_misaligned_nonaligned_start() {
235252
let mut arr = gen_arr::<32>();

0 commit comments

Comments
 (0)