Skip to content

Commit d236e1c

Browse files
committed
non-temporal stores: document interaction with Rust memory model
1 parent a79a8de commit d236e1c

File tree

6 files changed

+181
-5
lines changed

6 files changed

+181
-5
lines changed

crates/core_arch/src/x86/avx.rs

+27
Original file line numberDiff line numberDiff line change
@@ -1694,6 +1694,15 @@ pub unsafe fn _mm256_lddqu_si256(mem_addr: *const __m256i) -> __m256i {
16941694
/// non-temporal (unlikely to be used again soon)
16951695
///
16961696
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_stream_si256)
1697+
///
1698+
/// # Safety of non-temporal stores
1699+
///
1700+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
1701+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
1702+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
1703+
/// return.
1704+
///
1705+
/// See [`_mm_sfence`] for details.
16971706
#[inline]
16981707
#[target_feature(enable = "avx")]
16991708
#[cfg_attr(test, assert_instr(vmovntps))] // FIXME vmovntdq
@@ -1707,6 +1716,15 @@ pub unsafe fn _mm256_stream_si256(mem_addr: *mut __m256i, a: __m256i) {
17071716
/// flagged as non-temporal (unlikely to be used again soon).
17081717
///
17091718
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_stream_pd)
1719+
///
1720+
/// # Safety of non-temporal stores
1721+
///
1722+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
1723+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
1724+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
1725+
/// return.
1726+
///
1727+
/// See [`_mm_sfence`] for details.
17101728
#[inline]
17111729
#[target_feature(enable = "avx")]
17121730
#[cfg_attr(test, assert_instr(vmovntps))] // FIXME vmovntpd
@@ -1722,6 +1740,15 @@ pub unsafe fn _mm256_stream_pd(mem_addr: *mut f64, a: __m256d) {
17221740
/// soon).
17231741
///
17241742
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_stream_ps)
1743+
///
1744+
/// # Safety of non-temporal stores
1745+
///
1746+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
1747+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
1748+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
1749+
/// return.
1750+
///
1751+
/// See [`_mm_sfence`] for details.
17251752
#[inline]
17261753
#[target_feature(enable = "avx")]
17271754
#[cfg_attr(test, assert_instr(vmovntps))]

crates/core_arch/src/x86/avx512f.rs

+27
Original file line numberDiff line numberDiff line change
@@ -27999,6 +27999,15 @@ pub unsafe fn _mm_mask_testn_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) ->
2799927999
/// Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from a into memory using a non-temporal memory hint. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
2800028000
///
2800128001
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_ps&expand=5671)
28002+
///
28003+
/// # Safety of non-temporal stores
28004+
///
28005+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
28006+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
28007+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
28008+
/// return.
28009+
///
28010+
/// See [`_mm_sfence`] for details.
2800228011
#[inline]
2800328012
#[target_feature(enable = "avx512f")]
2800428013
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
@@ -28011,6 +28020,15 @@ pub unsafe fn _mm512_stream_ps(mem_addr: *mut f32, a: __m512) {
2801128020
/// Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from a into memory using a non-temporal memory hint. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
2801228021
///
2801328022
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_pd&expand=5667)
28023+
///
28024+
/// # Safety of non-temporal stores
28025+
///
28026+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
28027+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
28028+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
28029+
/// return.
28030+
///
28031+
/// See [`_mm_sfence`] for details.
2801428032
#[inline]
2801528033
#[target_feature(enable = "avx512f")]
2801628034
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
@@ -28023,6 +28041,15 @@ pub unsafe fn _mm512_stream_pd(mem_addr: *mut f64, a: __m512d) {
2802328041
/// Store 512-bits of integer data from a into memory using a non-temporal memory hint. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
2802428042
///
2802528043
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_si512&expand=5675)
28044+
///
28045+
/// # Safety of non-temporal stores
28046+
///
28047+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
28048+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
28049+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
28050+
/// return.
28051+
///
28052+
/// See [`_mm_sfence`] for details.
2802628053
#[inline]
2802728054
#[target_feature(enable = "avx512f")]
2802828055
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]

crates/core_arch/src/x86/sse.rs

+73-5
Original file line numberDiff line numberDiff line change
@@ -1348,14 +1348,73 @@ pub unsafe fn _mm_move_ss(a: __m128, b: __m128) -> __m128 {
13481348
simd_shuffle!(a, b, [4, 1, 2, 3])
13491349
}
13501350

1351-
/// Performs a serializing operation on all store-to-memory instructions that
1352-
/// were issued prior to this instruction.
1351+
/// Performs a serializing operation on all non-temporal ("streaming") store instructions that
1352+
/// were issued by the current thread prior to this instruction.
13531353
///
1354-
/// Guarantees that every store instruction that precedes, in program order, is
1355-
/// globally visible before any store instruction which follows the fence in
1356-
/// program order.
1354+
/// Guarantees that every non-temporal store instruction that precedes this fence, in program order, is
1355+
/// ordered before any load or store instruction which follows the fence in
1356+
/// synchronization order.
13571357
///
13581358
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sfence)
1359+
/// (but note that Intel is only documenting the hardware-level concerns related to this
1360+
/// instruction; the Intel documentation does not take into account the extra concerns that arise
1361+
/// because the Rust memory model is different from the x86 memory model.)
1362+
///
1363+
/// # Safety of non-temporal stores
1364+
///
1365+
/// After using any non-temporal store intrinsic, but before any other access to the memory that the
1366+
/// intrinsic mutates, a call to `_mm_sfence` must be performed on the thread that used the
1367+
/// intrinsic.
1368+
///
1369+
/// Non-temporal stores behave very different from regular stores. For the purpose of the Rust
1370+
/// memory model, these stores are happening asynchronously in a background thread. This means a
1371+
/// non-temporal store can cause data races with other accesses, even other accesses on the same
1372+
/// thread. It also means that cross-thread synchronization does not work as expected: let's say the
1373+
/// intrinsic is called on thread T1, and T1 performs synchronization with some other thread T2. The
1374+
/// non-temporal store acts as if it happened not in T1 but in a different thread T3, and T2 has not
1375+
/// synchronized with T3! Calling `_mm_sfence` makes the current thread wait for and synchronize
1376+
/// with all the non-temporal stores previously started on this thread, which means in particular
1377+
/// that subsequent synchronization with other threads will then work as intended again.
1378+
///
1379+
/// The general pattern to use non-temporal stores correctly is to call `_mm_sfence` before your
1380+
/// code jumps back to code outside your library. This ensures all stores inside your function
1381+
/// are synchronized-before the return, and thus transitively synchronized-before everything
1382+
/// the caller does after your function returns.
1383+
//
1384+
// The following is not a doc comment since it's not clear whether we want to put this into the
1385+
// docs, but it should be written out somewhere.
1386+
//
1387+
// Formally, we consider non-temporal stores and sfences to be opaque blobs that the compiler cannot
1388+
// inspect, and that behave like the following functions. This explains where the docs above come
1389+
// from.
1390+
// ```
1391+
// #[thread_local]
1392+
// static mut PENDING_NONTEMP_WRITES = AtomicUsize::new(0);
1393+
//
1394+
// pub unsafe fn nontemporal_store<T>(ptr: *mut T, val: T) {
1395+
// PENDING_NONTEMP_WRITES.fetch_add(1, Relaxed);
1396+
// // Spawn a thread that will eventually do our write.
1397+
// // We need to fetch a pointer to this thread's pending-write
1398+
// // counter, so that we can access it from the background thread.
1399+
// let pending_writes = addr_of!(PENDING_NONTEMP_WRITES);
1400+
// // If this was actual Rust code we'd have to do some extra work
1401+
// // because `ptr`, `val`, `pending_writes` are all `!Send`. We skip that here.
1402+
// std::thread::spawn(move || {
1403+
// // Do the write in the background thread.
1404+
// ptr.write(val);
1405+
// // Register the write as done. Crucially, this is `Release`, so it
1406+
// // syncs-with the `Acquire in `sfence`.
1407+
// (&*pending_writes).fetch_sub(1, Release);
1408+
// });
1409+
// }
1410+
//
1411+
// pub fn sfence() {
1412+
// unsafe {
1413+
// // Wait until there are no more pending writes.
1414+
// while PENDING_NONTEMP_WRITES.load(Acquire) > 0 {}
1415+
// }
1416+
// }
1417+
// ```
13591418
#[inline]
13601419
#[target_feature(enable = "sse")]
13611420
#[cfg_attr(test, assert_instr(sfence))]
@@ -1938,6 +1997,15 @@ extern "C" {
19381997
/// exception _may_ be generated.
19391998
///
19401999
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_ps)
2000+
///
2001+
/// # Safety of non-temporal stores
2002+
///
2003+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
2004+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
2005+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
2006+
/// return.
2007+
///
2008+
/// See [`_mm_sfence`] for details.
19412009
#[inline]
19422010
#[target_feature(enable = "sse")]
19432011
#[cfg_attr(test, assert_instr(movntps))]

crates/core_arch/src/x86/sse2.rs

+27
Original file line numberDiff line numberDiff line change
@@ -1317,6 +1317,15 @@ pub unsafe fn _mm_storel_epi64(mem_addr: *mut __m128i, a: __m128i) {
13171317
/// used again soon).
13181318
///
13191319
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si128)
1320+
///
1321+
/// # Safety of non-temporal stores
1322+
///
1323+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
1324+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
1325+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
1326+
/// return.
1327+
///
1328+
/// See [`_mm_sfence`] for details.
13201329
#[inline]
13211330
#[target_feature(enable = "sse2")]
13221331
#[cfg_attr(test, assert_instr(movntps))] // FIXME movntdq
@@ -1330,6 +1339,15 @@ pub unsafe fn _mm_stream_si128(mem_addr: *mut __m128i, a: __m128i) {
13301339
/// used again soon).
13311340
///
13321341
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si32)
1342+
///
1343+
/// # Safety of non-temporal stores
1344+
///
1345+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
1346+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
1347+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
1348+
/// return.
1349+
///
1350+
/// See [`_mm_sfence`] for details.
13331351
#[inline]
13341352
#[target_feature(enable = "sse2")]
13351353
#[cfg_attr(test, assert_instr(movnti))]
@@ -2515,6 +2533,15 @@ pub unsafe fn _mm_loadl_pd(a: __m128d, mem_addr: *const f64) -> __m128d {
25152533
/// used again soon).
25162534
///
25172535
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pd)
2536+
///
2537+
/// # Safety of non-temporal stores
2538+
///
2539+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
2540+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
2541+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
2542+
/// return.
2543+
///
2544+
/// See [`_mm_sfence`] for details.
25182545
#[inline]
25192546
#[target_feature(enable = "sse2")]
25202547
#[cfg_attr(test, assert_instr(movntps))] // FIXME movntpd

crates/core_arch/src/x86/sse4a.rs

+18
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,15 @@ pub unsafe fn _mm_insert_si64(x: __m128i, y: __m128i) -> __m128i {
6262
/// Non-temporal store of `a.0` into `p`.
6363
///
6464
/// Writes 64-bit data to a memory location without polluting the caches.
65+
///
66+
/// # Safety of non-temporal stores
67+
///
68+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
69+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
70+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
71+
/// return.
72+
///
73+
/// See [`_mm_sfence`] for details.
6574
#[inline]
6675
#[target_feature(enable = "sse4a")]
6776
#[cfg_attr(test, assert_instr(movntsd))]
@@ -73,6 +82,15 @@ pub unsafe fn _mm_stream_sd(p: *mut f64, a: __m128d) {
7382
/// Non-temporal store of `a.0` into `p`.
7483
///
7584
/// Writes 32-bit data to a memory location without polluting the caches.
85+
///
86+
/// # Safety of non-temporal stores
87+
///
88+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
89+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
90+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
91+
/// return.
92+
///
93+
/// See [`_mm_sfence`] for details.
7694
#[inline]
7795
#[target_feature(enable = "sse4a")]
7896
#[cfg_attr(test, assert_instr(movntss))]

crates/core_arch/src/x86_64/sse2.rs

+9
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,15 @@ pub unsafe fn _mm_cvttsd_si64x(a: __m128d) -> i64 {
6767
/// used again soon).
6868
///
6969
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si64)
70+
///
71+
/// # Safety of non-temporal stores
72+
///
73+
/// After using this intrinsic, but before any other access to the memory that this intrinsic
74+
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
75+
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
76+
/// return.
77+
///
78+
/// See [`_mm_sfence`] for details.
7079
#[inline]
7180
#[target_feature(enable = "sse2")]
7281
#[cfg_attr(test, assert_instr(movnti))]

0 commit comments

Comments
 (0)