@@ -55,7 +55,7 @@ fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 {
55
55
}
56
56
57
57
// Generic atomic read-modify-write operation
58
- unsafe fn atomic_rmw < T , F : Fn ( u32 ) -> u32 > ( ptr : * mut T , f : F ) -> u32 {
58
+ unsafe fn atomic_rmw < T , F : Fn ( u32 ) -> u32 , G : Fn ( u32 , u32 ) -> u32 > ( ptr : * mut T , f : F , g : G ) -> u32 {
59
59
let aligned_ptr = align_ptr ( ptr) ;
60
60
let ( shift, mask) = get_shift_mask ( ptr) ;
61
61
@@ -65,7 +65,7 @@ unsafe fn atomic_rmw<T, F: Fn(u32) -> u32>(ptr: *mut T, f: F) -> u32 {
65
65
let newval = f ( curval) ;
66
66
let newval_aligned = insert_aligned ( curval_aligned, newval, shift, mask) ;
67
67
if __kuser_cmpxchg ( curval_aligned, newval_aligned, aligned_ptr) {
68
- return curval;
68
+ return g ( curval, newval ) ;
69
69
}
70
70
}
71
71
}
@@ -89,13 +89,21 @@ unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 {
89
89
}
90
90
91
91
macro_rules! atomic_rmw {
92
- ( $name: ident, $ty: ty, $op: expr) => {
92
+ ( $name: ident, $ty: ty, $op: expr, $fetch : expr ) => {
93
93
intrinsics! {
94
94
pub unsafe extern "C" fn $name( ptr: * mut $ty, val: $ty) -> $ty {
95
- atomic_rmw( ptr, |x| $op( x as $ty, val) as u32 ) as $ty
95
+ atomic_rmw( ptr, |x| $op( x as $ty, val) as u32 , |old , new| $fetch ( old , new ) ) as $ty
96
96
}
97
97
}
98
98
} ;
99
+
100
+ ( @old $name: ident, $ty: ty, $op: expr) => {
101
+ atomic_rmw!( $name, $ty, $op, |old, _| old) ;
102
+ } ;
103
+
104
+ ( @new $name: ident, $ty: ty, $op: expr) => {
105
+ atomic_rmw!( $name, $ty, $op, |_, new| new) ;
106
+ } ;
99
107
}
100
108
macro_rules! atomic_cmpxchg {
101
109
( $name: ident, $ty: ty) => {
@@ -107,101 +115,129 @@ macro_rules! atomic_cmpxchg {
107
115
} ;
108
116
}
109
117
110
- atomic_rmw ! ( __sync_fetch_and_add_1, u8 , |a: u8 , b: u8 | a. wrapping_add( b) ) ;
111
- atomic_rmw ! ( __sync_fetch_and_add_2, u16 , |a: u16 , b: u16 | a
118
+ atomic_rmw ! ( @old __sync_fetch_and_add_1, u8 , |a: u8 , b: u8 | a. wrapping_add( b) ) ;
119
+ atomic_rmw ! ( @old __sync_fetch_and_add_2, u16 , |a: u16 , b: u16 | a
120
+ . wrapping_add( b) ) ;
121
+ atomic_rmw ! ( @old __sync_fetch_and_add_4, u32 , |a: u32 , b: u32 | a
122
+ . wrapping_add( b) ) ;
123
+
124
+ atomic_rmw ! ( @new __sync_add_and_fetch_1, u8 , |a: u8 , b: u8 | a. wrapping_add( b) ) ;
125
+ atomic_rmw ! ( @new __sync_add_and_fetch_2, u16 , |a: u16 , b: u16 | a
112
126
. wrapping_add( b) ) ;
113
- atomic_rmw ! ( __sync_fetch_and_add_4 , u32 , |a: u32 , b: u32 | a
127
+ atomic_rmw ! ( @new __sync_add_and_fetch_4 , u32 , |a: u32 , b: u32 | a
114
128
. wrapping_add( b) ) ;
115
129
116
- atomic_rmw ! ( __sync_fetch_and_sub_1, u8 , |a: u8 , b: u8 | a. wrapping_sub( b) ) ;
117
- atomic_rmw ! ( __sync_fetch_and_sub_2, u16 , |a: u16 , b: u16 | a
130
+ atomic_rmw ! ( @old __sync_fetch_and_sub_1, u8 , |a: u8 , b: u8 | a. wrapping_sub( b) ) ;
131
+ atomic_rmw ! ( @old __sync_fetch_and_sub_2, u16 , |a: u16 , b: u16 | a
118
132
. wrapping_sub( b) ) ;
119
- atomic_rmw ! ( __sync_fetch_and_sub_4, u32 , |a: u32 , b: u32 | a
133
+ atomic_rmw ! ( @old __sync_fetch_and_sub_4, u32 , |a: u32 , b: u32 | a
120
134
. wrapping_sub( b) ) ;
121
135
122
- atomic_rmw ! ( __sync_fetch_and_and_1, u8 , |a: u8 , b: u8 | a & b) ;
123
- atomic_rmw ! ( __sync_fetch_and_and_2, u16 , |a: u16 , b: u16 | a & b) ;
124
- atomic_rmw ! ( __sync_fetch_and_and_4, u32 , |a: u32 , b: u32 | a & b) ;
136
+ atomic_rmw ! ( @new __sync_sub_and_fetch_1, u8 , |a: u8 , b: u8 | a. wrapping_sub( b) ) ;
137
+ atomic_rmw ! ( @new __sync_sub_and_fetch_2, u16 , |a: u16 , b: u16 | a
138
+ . wrapping_sub( b) ) ;
139
+ atomic_rmw ! ( @new __sync_sub_and_fetch_4, u32 , |a: u32 , b: u32 | a
140
+ . wrapping_sub( b) ) ;
141
+
142
+ atomic_rmw ! ( @old __sync_fetch_and_and_1, u8 , |a: u8 , b: u8 | a & b) ;
143
+ atomic_rmw ! ( @old __sync_fetch_and_and_2, u16 , |a: u16 , b: u16 | a & b) ;
144
+ atomic_rmw ! ( @old __sync_fetch_and_and_4, u32 , |a: u32 , b: u32 | a & b) ;
145
+
146
+ atomic_rmw ! ( @new __sync_and_and_fetch_1, u8 , |a: u8 , b: u8 | a & b) ;
147
+ atomic_rmw ! ( @new __sync_and_and_fetch_2, u16 , |a: u16 , b: u16 | a & b) ;
148
+ atomic_rmw ! ( @new __sync_and_and_fetch_4, u32 , |a: u32 , b: u32 | a & b) ;
149
+
150
+ atomic_rmw ! ( @old __sync_fetch_and_or_1, u8 , |a: u8 , b: u8 | a | b) ;
151
+ atomic_rmw ! ( @old __sync_fetch_and_or_2, u16 , |a: u16 , b: u16 | a | b) ;
152
+ atomic_rmw ! ( @old __sync_fetch_and_or_4, u32 , |a: u32 , b: u32 | a | b) ;
153
+
154
+ atomic_rmw ! ( @new __sync_or_and_fetch_1, u8 , |a: u8 , b: u8 | a | b) ;
155
+ atomic_rmw ! ( @new __sync_or_and_fetch_2, u16 , |a: u16 , b: u16 | a | b) ;
156
+ atomic_rmw ! ( @new __sync_or_and_fetch_4, u32 , |a: u32 , b: u32 | a | b) ;
157
+
158
+ atomic_rmw ! ( @old __sync_fetch_and_xor_1, u8 , |a: u8 , b: u8 | a ^ b) ;
159
+ atomic_rmw ! ( @old __sync_fetch_and_xor_2, u16 , |a: u16 , b: u16 | a ^ b) ;
160
+ atomic_rmw ! ( @old __sync_fetch_and_xor_4, u32 , |a: u32 , b: u32 | a ^ b) ;
125
161
126
- atomic_rmw ! ( __sync_fetch_and_or_1 , u8 , |a: u8 , b: u8 | a | b) ;
127
- atomic_rmw ! ( __sync_fetch_and_or_2 , u16 , |a: u16 , b: u16 | a | b) ;
128
- atomic_rmw ! ( __sync_fetch_and_or_4 , u32 , |a: u32 , b: u32 | a | b) ;
162
+ atomic_rmw ! ( @new __sync_xor_and_fetch_1 , u8 , |a: u8 , b: u8 | a ^ b) ;
163
+ atomic_rmw ! ( @new __sync_xor_and_fetch_2 , u16 , |a: u16 , b: u16 | a ^ b) ;
164
+ atomic_rmw ! ( @new __sync_xor_and_fetch_4 , u32 , |a: u32 , b: u32 | a ^ b) ;
129
165
130
- atomic_rmw ! ( __sync_fetch_and_xor_1 , u8 , |a: u8 , b: u8 | a ^ b ) ;
131
- atomic_rmw ! ( __sync_fetch_and_xor_2 , u16 , |a: u16 , b: u16 | a ^ b ) ;
132
- atomic_rmw ! ( __sync_fetch_and_xor_4 , u32 , |a: u32 , b: u32 | a ^ b ) ;
166
+ atomic_rmw ! ( @old __sync_fetch_and_nand_1 , u8 , |a: u8 , b: u8 | ! ( a & b ) ) ;
167
+ atomic_rmw ! ( @old __sync_fetch_and_nand_2 , u16 , |a: u16 , b: u16 | ! ( a & b ) ) ;
168
+ atomic_rmw ! ( @old __sync_fetch_and_nand_4 , u32 , |a: u32 , b: u32 | ! ( a & b ) ) ;
133
169
134
- atomic_rmw ! ( __sync_fetch_and_nand_1 , u8 , |a: u8 , b: u8 | !( a & b) ) ;
135
- atomic_rmw ! ( __sync_fetch_and_nand_2 , u16 , |a: u16 , b: u16 | !( a & b) ) ;
136
- atomic_rmw ! ( __sync_fetch_and_nand_4 , u32 , |a: u32 , b: u32 | !( a & b) ) ;
170
+ atomic_rmw ! ( @new __sync_nand_and_fetch_1 , u8 , |a: u8 , b: u8 | !( a & b) ) ;
171
+ atomic_rmw ! ( @new __sync_nand_and_fetch_2 , u16 , |a: u16 , b: u16 | !( a & b) ) ;
172
+ atomic_rmw ! ( @new __sync_nand_and_fetch_4 , u32 , |a: u32 , b: u32 | !( a & b) ) ;
137
173
138
- atomic_rmw ! ( __sync_fetch_and_max_1, i8 , |a: i8 , b: i8 | if a > b {
174
+ atomic_rmw ! ( @old __sync_fetch_and_max_1, i8 , |a: i8 , b: i8 | if a > b {
139
175
a
140
176
} else {
141
177
b
142
178
} ) ;
143
- atomic_rmw ! ( __sync_fetch_and_max_2, i16 , |a: i16 , b: i16 | if a > b {
179
+ atomic_rmw ! ( @old __sync_fetch_and_max_2, i16 , |a: i16 , b: i16 | if a > b {
144
180
a
145
181
} else {
146
182
b
147
183
} ) ;
148
- atomic_rmw ! ( __sync_fetch_and_max_4, i32 , |a: i32 , b: i32 | if a > b {
184
+ atomic_rmw ! ( @old __sync_fetch_and_max_4, i32 , |a: i32 , b: i32 | if a > b {
149
185
a
150
186
} else {
151
187
b
152
188
} ) ;
153
189
154
- atomic_rmw ! ( __sync_fetch_and_umax_1, u8 , |a: u8 , b: u8 | if a > b {
190
+ atomic_rmw ! ( @old __sync_fetch_and_umax_1, u8 , |a: u8 , b: u8 | if a > b {
155
191
a
156
192
} else {
157
193
b
158
194
} ) ;
159
- atomic_rmw ! ( __sync_fetch_and_umax_2, u16 , |a: u16 , b: u16 | if a > b {
195
+ atomic_rmw ! ( @old __sync_fetch_and_umax_2, u16 , |a: u16 , b: u16 | if a > b {
160
196
a
161
197
} else {
162
198
b
163
199
} ) ;
164
- atomic_rmw ! ( __sync_fetch_and_umax_4, u32 , |a: u32 , b: u32 | if a > b {
200
+ atomic_rmw ! ( @old __sync_fetch_and_umax_4, u32 , |a: u32 , b: u32 | if a > b {
165
201
a
166
202
} else {
167
203
b
168
204
} ) ;
169
205
170
- atomic_rmw ! ( __sync_fetch_and_min_1, i8 , |a: i8 , b: i8 | if a < b {
206
+ atomic_rmw ! ( @old __sync_fetch_and_min_1, i8 , |a: i8 , b: i8 | if a < b {
171
207
a
172
208
} else {
173
209
b
174
210
} ) ;
175
- atomic_rmw ! ( __sync_fetch_and_min_2, i16 , |a: i16 , b: i16 | if a < b {
211
+ atomic_rmw ! ( @old __sync_fetch_and_min_2, i16 , |a: i16 , b: i16 | if a < b {
176
212
a
177
213
} else {
178
214
b
179
215
} ) ;
180
- atomic_rmw ! ( __sync_fetch_and_min_4, i32 , |a: i32 , b: i32 | if a < b {
216
+ atomic_rmw ! ( @old __sync_fetch_and_min_4, i32 , |a: i32 , b: i32 | if a < b {
181
217
a
182
218
} else {
183
219
b
184
220
} ) ;
185
221
186
- atomic_rmw ! ( __sync_fetch_and_umin_1, u8 , |a: u8 , b: u8 | if a < b {
222
+ atomic_rmw ! ( @old __sync_fetch_and_umin_1, u8 , |a: u8 , b: u8 | if a < b {
187
223
a
188
224
} else {
189
225
b
190
226
} ) ;
191
- atomic_rmw ! ( __sync_fetch_and_umin_2, u16 , |a: u16 , b: u16 | if a < b {
227
+ atomic_rmw ! ( @old __sync_fetch_and_umin_2, u16 , |a: u16 , b: u16 | if a < b {
192
228
a
193
229
} else {
194
230
b
195
231
} ) ;
196
- atomic_rmw ! ( __sync_fetch_and_umin_4, u32 , |a: u32 , b: u32 | if a < b {
232
+ atomic_rmw ! ( @old __sync_fetch_and_umin_4, u32 , |a: u32 , b: u32 | if a < b {
197
233
a
198
234
} else {
199
235
b
200
236
} ) ;
201
237
202
- atomic_rmw ! ( __sync_lock_test_and_set_1, u8 , |_: u8 , b: u8 | b) ;
203
- atomic_rmw ! ( __sync_lock_test_and_set_2, u16 , |_: u16 , b: u16 | b) ;
204
- atomic_rmw ! ( __sync_lock_test_and_set_4, u32 , |_: u32 , b: u32 | b) ;
238
+ atomic_rmw ! ( @old __sync_lock_test_and_set_1, u8 , |_: u8 , b: u8 | b) ;
239
+ atomic_rmw ! ( @old __sync_lock_test_and_set_2, u16 , |_: u16 , b: u16 | b) ;
240
+ atomic_rmw ! ( @old __sync_lock_test_and_set_4, u32 , |_: u32 , b: u32 | b) ;
205
241
206
242
atomic_cmpxchg ! ( __sync_val_compare_and_swap_1, u8 ) ;
207
243
atomic_cmpxchg ! ( __sync_val_compare_and_swap_2, u16 ) ;
0 commit comments