@@ -41,6 +41,61 @@ unsafe fn read_usize_unaligned(x: *const usize) -> usize {
41
41
core:: mem:: transmute ( x_read)
42
42
}
43
43
44
+ /// Load `load_sz` many bytes from `src`, which must be usize-aligned. Acts as if we did a `usize`
45
+ /// read with the out-of-bounds part filled with 0s.
46
+ /// `load_sz` be strictly less than `WORD_SIZE`.
47
+ #[ cfg( not( feature = "mem-unaligned" ) ) ]
48
+ #[ inline( always) ]
49
+ unsafe fn load_aligned_partial ( src : * const usize , load_sz : usize ) -> usize {
50
+ let mut i = 0 ;
51
+ let mut out = 0usize ;
52
+ macro_rules! load_prefix {
53
+ ( $( $ty: ty) +) => { $(
54
+ let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
55
+ if ( load_sz & chunk_sz) != 0 {
56
+ // Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
57
+ * ( & raw mut out) . byte_add( i) . cast:: <$ty>( ) = * src. byte_add( i) . cast:: <$ty>( ) ;
58
+ i |= chunk_sz;
59
+ }
60
+ ) +} ;
61
+ }
62
+ // We can read up to 7 bytes here, which is enough for WORD_SIZE of 8
63
+ // (since `load_size < WORD_SIZE`).
64
+ const { assert ! ( WORD_SIZE <= 8 ) } ;
65
+ load_prefix ! ( u32 u16 u8 ) ;
66
+ debug_assert ! ( i == load_sz) ;
67
+ out
68
+ }
69
+
70
+ /// Load `load_sz` many bytes from `src.byte_add(WORD_SIZE - load_sz)`. `src` must be `usize`-aligned.
71
+ /// The bytes are returned as the *last* bytes of the return value, i.e., acts as if we had done
72
+ /// a `usize` read from `src`, with the out-of-bounds part filled with 0s.
73
+ /// `load_sz` be strictly less than `WORD_SIZE`.
74
+ #[ cfg( not( feature = "mem-unaligned" ) ) ]
75
+ #[ inline( always) ]
76
+ unsafe fn load_aligned_end_partial ( src : * const usize , load_sz : usize ) -> usize {
77
+ let mut i = 0 ;
78
+ let mut out = 0usize ;
79
+ let start_shift = WORD_SIZE - load_sz;
80
+ macro_rules! load_prefix {
81
+ ( $( $ty: ty) +) => { $(
82
+ let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
83
+ if ( load_sz & chunk_sz) != 0 {
84
+ // Since we are doing the small reads first, `start_shift + i` has in the mean
85
+ // time become aligned to `chunk_sz`.
86
+ * ( & raw mut out) . byte_add( start_shift + i) . cast:: <$ty>( ) = * src. byte_add( start_shift + i) . cast:: <$ty>( ) ;
87
+ i |= chunk_sz;
88
+ }
89
+ ) +} ;
90
+ }
91
+ // We can read up to 7 bytes here, which is enough for WORD_SIZE of 8
92
+ // (since `load_size < WORD_SIZE`).
93
+ const { assert ! ( WORD_SIZE <= 8 ) } ;
94
+ load_prefix ! ( u8 u16 u32 ) ;
95
+ debug_assert ! ( i == load_sz) ;
96
+ out
97
+ }
98
+
44
99
#[ inline( always) ]
45
100
pub unsafe fn copy_forward ( mut dest : * mut u8 , mut src : * const u8 , mut n : usize ) {
46
101
#[ inline( always) ]
@@ -66,40 +121,54 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
66
121
}
67
122
}
68
123
124
+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
125
+ /// `src` *must not* be `usize`-aligned.
69
126
#[ cfg( not( feature = "mem-unaligned" ) ) ]
70
127
#[ inline( always) ]
71
128
unsafe fn copy_forward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
129
+ debug_assert ! ( n > 0 && n % WORD_SIZE == 0 ) ;
130
+
72
131
let mut dest_usize = dest as * mut usize ;
73
132
let dest_end = dest. wrapping_add ( n) as * mut usize ;
74
133
75
134
// Calculate the misalignment offset and shift needed to reassemble value.
135
+ // Since `src` is definitely not aligned, `offset` is in the range 1..WORD_SIZE.
76
136
let offset = src as usize & WORD_MASK ;
77
137
let shift = offset * 8 ;
78
138
79
139
// Realign src
80
- let mut src_aligned = ( src as usize & !WORD_MASK ) as * mut usize ;
81
- // This will read (but won't use) bytes out of bound.
82
- // cfg needed because not all targets will have atomic loads that can be lowered
83
- // (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
84
- #[ cfg( target_has_atomic_load_store = "ptr" ) ]
85
- let mut prev_word = core:: intrinsics:: atomic_load_unordered ( src_aligned) ;
86
- #[ cfg( not( target_has_atomic_load_store = "ptr" ) ) ]
87
- let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
140
+ let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
141
+ let mut prev_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
88
142
89
- while dest_usize < dest_end {
143
+ while dest_usize. wrapping_add ( 1 ) < dest_end {
90
144
src_aligned = src_aligned. wrapping_add ( 1 ) ;
91
145
let cur_word = * src_aligned;
92
146
#[ cfg( target_endian = "little" ) ]
93
- let resembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
147
+ let reassembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
94
148
#[ cfg( target_endian = "big" ) ]
95
- let resembled = prev_word << shift | cur_word >> ( WORD_SIZE * 8 - shift) ;
149
+ let reassembled = prev_word << shift | cur_word >> ( WORD_SIZE * 8 - shift) ;
96
150
prev_word = cur_word;
97
151
98
- * dest_usize = resembled ;
152
+ * dest_usize = reassembled ;
99
153
dest_usize = dest_usize. wrapping_add ( 1 ) ;
100
154
}
155
+
156
+ // There's one more element left to go, and we can't use the loop for that as on the `src` side,
157
+ // it is partially out-of-bounds.
158
+ src_aligned = src_aligned. wrapping_add ( 1 ) ;
159
+ let cur_word = load_aligned_partial ( src_aligned, offset) ;
160
+ #[ cfg( target_endian = "little" ) ]
161
+ let reassembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
162
+ #[ cfg( target_endian = "big" ) ]
163
+ let reassembled = prev_word << shift | cur_word >> ( WORD_SIZE * 8 - shift) ;
164
+ // prev_word does not matter any more
165
+
166
+ * dest_usize = reassembled;
167
+ // dest_usize does not matter any more
101
168
}
102
169
170
+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
171
+ /// `src` *must not* be `usize`-aligned.
103
172
#[ cfg( feature = "mem-unaligned" ) ]
104
173
#[ inline( always) ]
105
174
unsafe fn copy_forward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
@@ -164,40 +233,54 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
164
233
}
165
234
}
166
235
236
+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
237
+ /// `src` *must not* be `usize`-aligned.
167
238
#[ cfg( not( feature = "mem-unaligned" ) ) ]
168
239
#[ inline( always) ]
169
240
unsafe fn copy_backward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
241
+ debug_assert ! ( n > 0 && n % WORD_SIZE == 0 ) ;
242
+
170
243
let mut dest_usize = dest as * mut usize ;
171
- let dest_start = dest. wrapping_sub ( n) as * mut usize ;
244
+ let dest_start = dest. wrapping_sub ( n) as * mut usize ; // we're moving towards the start
172
245
173
246
// Calculate the misalignment offset and shift needed to reassemble value.
247
+ // Since `src` is definitely not aligned, `offset` is in the range 1..WORD_SIZE.
174
248
let offset = src as usize & WORD_MASK ;
175
249
let shift = offset * 8 ;
176
250
177
- // Realign src_aligned
178
- let mut src_aligned = ( src as usize & !WORD_MASK ) as * mut usize ;
179
- // This will read (but won't use) bytes out of bound.
180
- // cfg needed because not all targets will have atomic loads that can be lowered
181
- // (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
182
- #[ cfg( target_has_atomic_load_store = "ptr" ) ]
183
- let mut prev_word = core:: intrinsics:: atomic_load_unordered ( src_aligned) ;
184
- #[ cfg( not( target_has_atomic_load_store = "ptr" ) ) ]
185
- let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
251
+ // Realign src
252
+ let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
253
+ let mut prev_word = load_aligned_partial ( src_aligned, offset) ;
186
254
187
- while dest_start < dest_usize {
255
+ while dest_start. wrapping_add ( 1 ) < dest_usize {
188
256
src_aligned = src_aligned. wrapping_sub ( 1 ) ;
189
257
let cur_word = * src_aligned;
190
258
#[ cfg( target_endian = "little" ) ]
191
- let resembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
259
+ let reassembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
192
260
#[ cfg( target_endian = "big" ) ]
193
- let resembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
261
+ let reassembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
194
262
prev_word = cur_word;
195
263
196
264
dest_usize = dest_usize. wrapping_sub ( 1 ) ;
197
- * dest_usize = resembled ;
265
+ * dest_usize = reassembled ;
198
266
}
267
+
268
+ // There's one more element left to go, and we can't use the loop for that as on the `src` side,
269
+ // it is partially out-of-bounds.
270
+ src_aligned = src_aligned. wrapping_sub ( 1 ) ;
271
+ let cur_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
272
+ #[ cfg( target_endian = "little" ) ]
273
+ let reassembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
274
+ #[ cfg( target_endian = "big" ) ]
275
+ let reassembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
276
+ // prev_word does not matter any more
277
+
278
+ dest_usize = dest_usize. wrapping_sub ( 1 ) ;
279
+ * dest_usize = reassembled;
199
280
}
200
281
282
+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
283
+ /// `src` *must not* be `usize`-aligned.
201
284
#[ cfg( feature = "mem-unaligned" ) ]
202
285
#[ inline( always) ]
203
286
unsafe fn copy_backward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
0 commit comments