tmp/tmpxpn02pre/{from.md → to.md}
RENAMED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### Volatile access <a id="depr.atomics.volatile">[[depr.atomics.volatile]]</a>
|
| 2 |
+
|
| 3 |
+
If an atomic specialization has one of the following overloads, then
|
| 4 |
+
that overload participates in overload resolution even if
|
| 5 |
+
`atomic<T>::is_always_lock_free` is `false`:
|
| 6 |
+
|
| 7 |
+
``` cpp
|
| 8 |
+
void store(T desired, memory_order order = memory_order::seq_cst) volatile noexcept;
|
| 9 |
+
T operator=(T desired) volatile noexcept;
|
| 10 |
+
T load(memory_order order = memory_order::seq_cst) const volatile noexcept;
|
| 11 |
+
operator T() const volatile noexcept;
|
| 12 |
+
T exchange(T desired, memory_order order = memory_order::seq_cst) volatile noexcept;
|
| 13 |
+
bool compare_exchange_weak(T& expected, T desired,
|
| 14 |
+
memory_order success, memory_order failure) volatile noexcept;
|
| 15 |
+
bool compare_exchange_strong(T& expected, T desired,
|
| 16 |
+
memory_order success, memory_order failure) volatile noexcept;
|
| 17 |
+
bool compare_exchange_weak(T& expected, T desired,
|
| 18 |
+
memory_order order = memory_order::seq_cst) volatile noexcept;
|
| 19 |
+
bool compare_exchange_strong(T& expected, T desired,
|
| 20 |
+
memory_order order = memory_order::seq_cst) volatile noexcept;
|
| 21 |
+
T fetch_key(T operand, memory_order order = memory_order::seq_cst) volatile noexcept;
|
| 22 |
+
T operator op=(T operand) volatile noexcept;
|
| 23 |
+
T* fetch_key(ptrdiff_t operand, memory_order order = memory_order::seq_cst) volatile noexcept;
|
| 24 |
+
```
|
| 25 |
+
|