@@ -23,143 +23,261 @@ intrinsics! {
23
23
#[ naked]
24
24
#[ cfg( not( target_env = "msvc" ) ) ]
25
25
pub unsafe extern "C" fn __aeabi_uidivmod( ) {
26
- core:: arch:: naked_asm!(
27
- "push {{lr}}" ,
28
- "sub sp, sp, #4" ,
29
- "mov r2, sp" ,
30
- bl!( "__udivmodsi4" ) ,
31
- "ldr r1, [sp]" ,
32
- "add sp, sp, #4" ,
33
- "pop {{pc}}" ,
34
- ) ;
26
+ unsafe {
27
+ core:: arch:: naked_asm!(
28
+ "push {{lr}}" ,
29
+ "sub sp, sp, #4" ,
30
+ "mov r2, sp" ,
31
+ bl!( "__udivmodsi4" ) ,
32
+ "ldr r1, [sp]" ,
33
+ "add sp, sp, #4" ,
34
+ "pop {{pc}}" ,
35
+ ) ;
36
+ }
35
37
}
36
38
37
39
#[ naked]
38
40
pub unsafe extern "C" fn __aeabi_uldivmod( ) {
39
- core:: arch:: naked_asm!(
40
- "push {{r4, lr}}" ,
41
- "sub sp, sp, #16" ,
42
- "add r4, sp, #8" ,
43
- "str r4, [sp]" ,
44
- bl!( "__udivmoddi4" ) ,
45
- "ldr r2, [sp, #8]" ,
46
- "ldr r3, [sp, #12]" ,
47
- "add sp, sp, #16" ,
48
- "pop {{r4, pc}}" ,
49
- ) ;
41
+ unsafe {
42
+ core:: arch:: naked_asm!(
43
+ "push {{r4, lr}}" ,
44
+ "sub sp, sp, #16" ,
45
+ "add r4, sp, #8" ,
46
+ "str r4, [sp]" ,
47
+ bl!( "__udivmoddi4" ) ,
48
+ "ldr r2, [sp, #8]" ,
49
+ "ldr r3, [sp, #12]" ,
50
+ "add sp, sp, #16" ,
51
+ "pop {{r4, pc}}" ,
52
+ ) ;
53
+ }
50
54
}
51
55
52
56
#[ naked]
53
57
pub unsafe extern "C" fn __aeabi_idivmod( ) {
54
- core:: arch:: naked_asm!(
55
- "push {{r0, r1, r4, lr}}" ,
56
- bl!( "__aeabi_idiv" ) ,
57
- "pop {{r1, r2}}" ,
58
- "muls r2, r2, r0" ,
59
- "subs r1, r1, r2" ,
60
- "pop {{r4, pc}}" ,
61
- ) ;
58
+ unsafe {
59
+ core:: arch:: naked_asm!(
60
+ "push {{r0, r1, r4, lr}}" ,
61
+ bl!( "__aeabi_idiv" ) ,
62
+ "pop {{r1, r2}}" ,
63
+ "muls r2, r2, r0" ,
64
+ "subs r1, r1, r2" ,
65
+ "pop {{r4, pc}}" ,
66
+ ) ;
67
+ }
62
68
}
63
69
64
70
#[ naked]
65
71
pub unsafe extern "C" fn __aeabi_ldivmod( ) {
66
- core:: arch:: naked_asm!(
67
- "push {{r4, lr}}" ,
68
- "sub sp, sp, #16" ,
69
- "add r4, sp, #8" ,
70
- "str r4, [sp]" ,
71
- bl!( "__divmoddi4" ) ,
72
- "ldr r2, [sp, #8]" ,
73
- "ldr r3, [sp, #12]" ,
74
- "add sp, sp, #16" ,
75
- "pop {{r4, pc}}" ,
76
- ) ;
72
+ unsafe {
73
+ core:: arch:: naked_asm!(
74
+ "push {{r4, lr}}" ,
75
+ "sub sp, sp, #16" ,
76
+ "add r4, sp, #8" ,
77
+ "str r4, [sp]" ,
78
+ bl!( "__divmoddi4" ) ,
79
+ "ldr r2, [sp, #8]" ,
80
+ "ldr r3, [sp, #12]" ,
81
+ "add sp, sp, #16" ,
82
+ "pop {{r4, pc}}" ,
83
+ ) ;
84
+ }
77
85
}
78
86
79
- // FIXME: The `*4` and `*8` variants should be defined as aliases.
87
+ // FIXME(arm) : The `*4` and `*8` variants should be defined as aliases.
80
88
89
+ /// `memcpy` provided with the `aapcs` ABI.
90
+ ///
91
+ /// # Safety
92
+ ///
93
+ /// Usual `memcpy` requirements apply.
81
94
#[ cfg( not( target_vendor = "apple" ) ) ]
82
- pub unsafe extern "aapcs" fn __aeabi_memcpy( dest: * mut u8 , src: * const u8 , n: usize ) {
83
- crate :: mem:: memcpy( dest, src, n) ;
95
+ pub unsafe extern "aapcs" fn __aeabi_memcpy( dst: * mut u8 , src: * const u8 , n: usize ) {
96
+ // SAFETY: memcpy preconditions apply.
97
+ unsafe { crate :: mem:: memcpy( dst, src, n) } ;
84
98
}
85
99
100
+ /// `memcpy` for 4-byte alignment.
101
+ ///
102
+ /// # Safety
103
+ ///
104
+ /// Usual `memcpy` requirements apply. Additionally, `dest` and `src` must be aligned to
105
+ /// four bytes.
86
106
#[ cfg( not( target_vendor = "apple" ) ) ]
87
- pub unsafe extern "aapcs" fn __aeabi_memcpy4( dest : * mut u8 , src: * const u8 , n: usize ) {
107
+ pub unsafe extern "aapcs" fn __aeabi_memcpy4( dst : * mut u8 , src: * const u8 , n: usize ) {
88
108
// We are guaranteed 4-alignment, so accessing at u32 is okay.
89
- let mut dest = dest as * mut u32 ;
90
- let mut src = src as * mut u32 ;
109
+ let mut dst = dst. cast:: <u32 >( ) ;
110
+ let mut src = src. cast:: <u32 >( ) ;
111
+ debug_assert!( dst. is_aligned( ) ) ;
112
+ debug_assert!( src. is_aligned( ) ) ;
91
113
let mut n = n;
92
114
93
115
while n >= 4 {
94
- * dest = * src;
95
- dest = dest. offset( 1 ) ;
96
- src = src. offset( 1 ) ;
116
+ // SAFETY: `dst` and `src` are both valid for at least 4 bytes, from
117
+ // `memcpy` preconditions and the loop guard.
118
+ unsafe { * dst = * src } ;
119
+
120
+ // TODO
121
+ unsafe {
122
+ dst = dst. offset( 1 ) ;
123
+ src = src. offset( 1 ) ;
124
+ }
125
+
97
126
n -= 4 ;
98
127
}
99
128
100
- __aeabi_memcpy( dest as * mut u8 , src as * const u8 , n) ;
129
+ // SAFETY: `dst` and `src` will still be valid for `n` bytes
130
+ unsafe { __aeabi_memcpy( dst. cast:: <u8 >( ) , src. cast:: <u8 >( ) , n) } ;
101
131
}
102
132
133
+ /// `memcpy` for 8-byte alignment.
134
+ ///
135
+ /// # Safety
136
+ ///
137
+ /// Usual `memcpy` requirements apply. Additionally, `dest` and `src` must be aligned to
138
+ /// eight bytes.
103
139
#[ cfg( not( target_vendor = "apple" ) ) ]
104
- pub unsafe extern "aapcs" fn __aeabi_memcpy8( dest: * mut u8 , src: * const u8 , n: usize ) {
105
- __aeabi_memcpy4( dest, src, n) ;
140
+ pub unsafe extern "aapcs" fn __aeabi_memcpy8( dst: * mut u8 , src: * const u8 , n: usize ) {
141
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
142
+ debug_assert!( src. addr( ) & 7 == 0 ) ;
143
+
144
+ // SAFETY: memcpy preconditions apply, less strict alignment.
145
+ unsafe { __aeabi_memcpy4( dst, src, n) } ;
106
146
}
107
147
148
+ /// `memmove` provided with the `aapcs` ABI.
149
+ ///
150
+ /// # Safety
151
+ ///
152
+ /// Usual `memmove` requirements apply.
108
153
#[ cfg( not( target_vendor = "apple" ) ) ]
109
- pub unsafe extern "aapcs" fn __aeabi_memmove( dest: * mut u8 , src: * const u8 , n: usize ) {
110
- crate :: mem:: memmove( dest, src, n) ;
154
+ pub unsafe extern "aapcs" fn __aeabi_memmove( dst: * mut u8 , src: * const u8 , n: usize ) {
155
+ // SAFETY: memmove preconditions apply.
156
+ unsafe { crate :: mem:: memmove( dst, src, n) } ;
111
157
}
112
158
159
+ /// `memmove` for 4-byte alignment.
160
+ ///
161
+ /// # Safety
162
+ ///
163
+ /// Usual `memmove` requirements apply. Additionally, `dest` and `src` must be aligned to
164
+ /// four bytes.
113
165
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
114
- pub unsafe extern "aapcs" fn __aeabi_memmove4( dest: * mut u8 , src: * const u8 , n: usize ) {
115
- __aeabi_memmove( dest, src, n) ;
166
+ pub unsafe extern "aapcs" fn __aeabi_memmove4( dst: * mut u8 , src: * const u8 , n: usize ) {
167
+ debug_assert!( dst. addr( ) & 3 == 0 ) ;
168
+ debug_assert!( src. addr( ) & 3 == 0 ) ;
169
+
170
+ // SAFETY: same preconditions, less strict aligment.
171
+ unsafe { __aeabi_memmove( dst, src, n) } ;
116
172
}
117
173
174
+ /// `memmove` for 8-byte alignment.
175
+ ///
176
+ /// # Safety
177
+ ///
178
+ /// Usual `memmove` requirements apply. Additionally, `dst` and `src` must be aligned to
179
+ /// eight bytes.
118
180
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
119
- pub unsafe extern "aapcs" fn __aeabi_memmove8( dest: * mut u8 , src: * const u8 , n: usize ) {
120
- __aeabi_memmove( dest, src, n) ;
181
+ pub unsafe extern "aapcs" fn __aeabi_memmove8( dst: * mut u8 , src: * const u8 , n: usize ) {
182
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
183
+ debug_assert!( src. addr( ) & 7 == 0 ) ;
184
+
185
+ // SAFETY: memmove preconditions apply, less strict alignment.
186
+ unsafe { __aeabi_memmove( dst, src, n) } ;
121
187
}
122
188
189
+ /// `memset` provided with the `aapcs` ABI.
190
+ ///
191
+ /// # Safety
192
+ ///
193
+ /// Usual `memset` requirements apply.
123
194
#[ cfg( not( target_vendor = "apple" ) ) ]
124
- pub unsafe extern "aapcs" fn __aeabi_memset( dest : * mut u8 , n: usize , c: i32 ) {
195
+ pub unsafe extern "aapcs" fn __aeabi_memset( dst : * mut u8 , n: usize , c: i32 ) {
125
196
// Note the different argument order
126
- crate :: mem:: memset( dest, c, n) ;
197
+ // SAFETY: memset preconditions apply.
198
+ unsafe { crate :: mem:: memset( dst, c, n) } ;
127
199
}
128
200
201
+ /// `memset` for 4-byte alignment.
202
+ ///
203
+ /// # Safety
204
+ ///
205
+ /// Usual `memset` requirements apply. Additionally, `dest` and `src` must be aligned to
206
+ /// four bytes.
129
207
#[ cfg( not( target_vendor = "apple" ) ) ]
130
- pub unsafe extern "aapcs" fn __aeabi_memset4( dest: * mut u8 , n: usize , c: i32 ) {
131
- let mut dest = dest as * mut u32 ;
208
+ pub unsafe extern "aapcs" fn __aeabi_memset4( dst: * mut u8 , n: usize , c: i32 ) {
209
+ let mut dst = dst. cast:: <u32 >( ) ;
210
+ debug_assert!( dst. is_aligned( ) ) ;
132
211
let mut n = n;
133
212
134
213
let byte = ( c as u32 ) & 0xff ;
135
214
let c = ( byte << 24 ) | ( byte << 16 ) | ( byte << 8 ) | byte;
136
215
137
216
while n >= 4 {
138
- * dest = c;
139
- dest = dest. offset( 1 ) ;
217
+ // SAFETY: `dst` is valid for at least 4 bytes, from `memset` preconditions and
218
+ // the loop guard.
219
+ unsafe { * dst = c } ;
220
+ // TODO
221
+ unsafe {
222
+ dst = dst. offset( 1 ) ;
223
+ }
140
224
n -= 4 ;
141
225
}
142
226
143
- __aeabi_memset( dest as * mut u8 , n, byte as i32 ) ;
227
+ // SAFETY: `dst` will still be valid for `n` bytes
228
+ unsafe { __aeabi_memset( dst. cast:: <u8 >( ) , n, byte as i32 ) } ;
144
229
}
145
230
231
+ /// `memset` for 8-byte alignment.
232
+ ///
233
+ /// # Safety
234
+ ///
235
+ /// Usual `memset` requirements apply. Additionally, `dst` and `src` must be aligned to
236
+ /// eight bytes.
146
237
#[ cfg( not( target_vendor = "apple" ) ) ]
147
- pub unsafe extern "aapcs" fn __aeabi_memset8( dest: * mut u8 , n: usize , c: i32 ) {
148
- __aeabi_memset4( dest, n, c) ;
238
+ pub unsafe extern "aapcs" fn __aeabi_memset8( dst: * mut u8 , n: usize , c: i32 ) {
239
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
240
+
241
+ // SAFETY: memset preconditions apply, less strict alignment.
242
+ unsafe { __aeabi_memset4( dst, n, c) } ;
149
243
}
150
244
245
+ /// `memclr` provided with the `aapcs` ABI.
246
+ ///
247
+ /// # Safety
248
+ ///
249
+ /// Usual `memclr` requirements apply.
151
250
#[ cfg( not( target_vendor = "apple" ) ) ]
152
- pub unsafe extern "aapcs" fn __aeabi_memclr( dest: * mut u8 , n: usize ) {
153
- __aeabi_memset( dest, n, 0 ) ;
251
+ pub unsafe extern "aapcs" fn __aeabi_memclr( dst: * mut u8 , n: usize ) {
252
+ // SAFETY: memclr preconditions apply, less strict alignment.
253
+ unsafe { __aeabi_memset( dst, n, 0 ) } ;
154
254
}
155
255
256
+ /// `memclr` for 4-byte alignment.
257
+ ///
258
+ /// # Safety
259
+ ///
260
+ /// Usual `memclr` requirements apply. Additionally, `dest` and `src` must be aligned to
261
+ /// four bytes.
156
262
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
157
- pub unsafe extern "aapcs" fn __aeabi_memclr4( dest: * mut u8 , n: usize ) {
158
- __aeabi_memset4( dest, n, 0 ) ;
263
+ pub unsafe extern "aapcs" fn __aeabi_memclr4( dst: * mut u8 , n: usize ) {
264
+ debug_assert!( dst. addr( ) & 3 == 0 ) ;
265
+
266
+ // SAFETY: memclr preconditions apply, less strict alignment.
267
+ unsafe { __aeabi_memset4( dst, n, 0 ) } ;
159
268
}
160
269
270
+ /// `memclr` for 8-byte alignment.
271
+ ///
272
+ /// # Safety
273
+ ///
274
+ /// Usual `memclr` requirements apply. Additionally, `dst` and `src` must be aligned to
275
+ /// eight bytes.
161
276
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
162
- pub unsafe extern "aapcs" fn __aeabi_memclr8( dest: * mut u8 , n: usize ) {
163
- __aeabi_memset4( dest, n, 0 ) ;
277
+ pub unsafe extern "aapcs" fn __aeabi_memclr8( dst: * mut u8 , n: usize ) {
278
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
279
+
280
+ // SAFETY: memclr preconditions apply, less strict alignment.
281
+ unsafe { __aeabi_memset4( dst, n, 0 ) } ;
164
282
}
165
283
}
0 commit comments