From Jason Turner

[atomics]

Large diff (120.4 KB) - rendering may be slow on some devices

Diff to HTML by rtfpessoa

Files changed (1) hide show
  1. tmp/tmpb93mdo86/{from.md → to.md} +1884 -378
tmp/tmpb93mdo86/{from.md → to.md} RENAMED
@@ -4,59 +4,64 @@
4
 
5
  This Clause describes components for fine-grained atomic access. This
6
  access is provided via operations on atomic objects.
7
 
8
  The following subclauses describe atomics requirements and components
9
- for types and operations, as summarized below.
10
 
11
- **Table: Atomics library summary** <a id="tab:atomics.lib.summary">[tab:atomics.lib.summary]</a>
12
 
13
  | Subclause | | Header |
14
- | ---------------------------- | -------------------------- | ---------- |
15
- | [[atomics.order]] | Order and Consistency | |
16
- | [[atomics.lockfree]] | Lock-free Property | |
17
- | [[atomics.types.generic]] | Atomic Types | `<atomic>` |
18
- | [[atomics.types.operations]] | Operations on Atomic Types | |
19
- | [[atomics.flag]] | Flag Type and Operations | |
 
 
 
20
  | [[atomics.fences]] | Fences | |
21
 
22
 
23
  ## Header `<atomic>` synopsis <a id="atomics.syn">[[atomics.syn]]</a>
24
 
25
  ``` cpp
26
  namespace std {
27
  // [atomics.order], order and consistency
28
- enum memory_order;
29
  template<class T>
30
  T kill_dependency(T y) noexcept;
31
 
32
  // [atomics.lockfree], lock-free property
33
  #define ATOMIC_BOOL_LOCK_FREE unspecified
34
  #define ATOMIC_CHAR_LOCK_FREE unspecified
 
35
  #define ATOMIC_CHAR16_T_LOCK_FREE unspecified
36
  #define ATOMIC_CHAR32_T_LOCK_FREE unspecified
37
  #define ATOMIC_WCHAR_T_LOCK_FREE unspecified
38
  #define ATOMIC_SHORT_LOCK_FREE unspecified
39
  #define ATOMIC_INT_LOCK_FREE unspecified
40
  #define ATOMIC_LONG_LOCK_FREE unspecified
41
  #define ATOMIC_LLONG_LOCK_FREE unspecified
42
  #define ATOMIC_POINTER_LOCK_FREE unspecified
43
 
44
- // [atomics.types.generic], atomic
 
 
 
 
 
45
  template<class T> struct atomic;
46
  // [atomics.types.pointer], partial specialization for pointers
47
  template<class T> struct atomic<T*>;
48
 
49
  // [atomics.nonmembers], non-member functions
50
  template<class T>
51
  bool atomic_is_lock_free(const volatile atomic<T>*) noexcept;
52
  template<class T>
53
  bool atomic_is_lock_free(const atomic<T>*) noexcept;
54
- template<class T>
55
- void atomic_init(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
56
- template<class T>
57
- void atomic_init(atomic<T>*, typename atomic<T>::value_type) noexcept;
58
  template<class T>
59
  void atomic_store(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
60
  template<class T>
61
  void atomic_store(atomic<T>*, typename atomic<T>::value_type) noexcept;
62
  template<class T>
@@ -72,11 +77,11 @@ namespace std {
72
  template<class T>
73
  T atomic_load_explicit(const volatile atomic<T>*, memory_order) noexcept;
74
  template<class T>
75
  T atomic_load_explicit(const atomic<T>*, memory_order) noexcept;
76
  template<class T>
77
- T atomic_exchange(volatile atomic<T>*, T) noexcept;
78
  template<class T>
79
  T atomic_exchange(atomic<T>*, typename atomic<T>::value_type) noexcept;
80
  template<class T>
81
  T atomic_exchange_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
82
  memory_order) noexcept;
@@ -169,12 +174,28 @@ namespace std {
169
  memory_order) noexcept;
170
  template<class T>
171
  T atomic_fetch_xor_explicit(atomic<T>*, typename atomic<T>::value_type,
172
  memory_order) noexcept;
173
 
174
- // [atomics.types.operations], initialization
175
- #define ATOMIC_VAR_INIT(value) see below
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
  // [atomics.alias], type aliases
178
  using atomic_bool = atomic<bool>;
179
  using atomic_char = atomic<char>;
180
  using atomic_schar = atomic<signed char>;
@@ -185,10 +206,11 @@ namespace std {
185
  using atomic_uint = atomic<unsigned int>;
186
  using atomic_long = atomic<long>;
187
  using atomic_ulong = atomic<unsigned long>;
188
  using atomic_llong = atomic<long long>;
189
  using atomic_ullong = atomic<unsigned long long>;
 
190
  using atomic_char16_t = atomic<char16_t>;
191
  using atomic_char32_t = atomic<char32_t>;
192
  using atomic_wchar_t = atomic<wchar_t>;
193
 
194
  using atomic_int8_t = atomic<int8_t>;
@@ -223,21 +245,39 @@ namespace std {
223
  using atomic_size_t = atomic<size_t>;
224
  using atomic_ptrdiff_t = atomic<ptrdiff_t>;
225
  using atomic_intmax_t = atomic<intmax_t>;
226
  using atomic_uintmax_t = atomic<uintmax_t>;
227
 
 
 
 
228
  // [atomics.flag], flag type and operations
229
  struct atomic_flag;
 
 
 
 
 
230
  bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
231
  bool atomic_flag_test_and_set(atomic_flag*) noexcept;
232
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
233
  bool atomic_flag_test_and_set_explicit(atomic_flag*, memory_order) noexcept;
234
  void atomic_flag_clear(volatile atomic_flag*) noexcept;
235
  void atomic_flag_clear(atomic_flag*) noexcept;
236
  void atomic_flag_clear_explicit(volatile atomic_flag*, memory_order) noexcept;
237
  void atomic_flag_clear_explicit(atomic_flag*, memory_order) noexcept;
238
- #define ATOMIC_FLAG_INIT see below
 
 
 
 
 
 
 
 
 
 
239
 
240
  // [atomics.fences], fences
241
  extern "C" void atomic_thread_fence(memory_order) noexcept;
242
  extern "C" void atomic_signal_fence(memory_order) noexcept;
243
  }
@@ -247,149 +287,158 @@ namespace std {
247
 
248
  The type aliases `atomic_intN_t`, `atomic_uintN_t`, `atomic_intptr_t`,
249
  and `atomic_uintptr_t` are defined if and only if `intN_t`, `uintN_t`,
250
  `intptr_t`, and `uintptr_t` are defined, respectively.
251
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  ## Order and consistency <a id="atomics.order">[[atomics.order]]</a>
253
 
254
  ``` cpp
255
  namespace std {
256
- enum memory_order {
257
- memory_order_relaxed, memory_order_consume, memory_order_acquire,
258
- memory_order_release, memory_order_acq_rel, memory_order_seq_cst
259
  };
 
 
 
 
 
 
260
  }
261
  ```
262
 
263
  The enumeration `memory_order` specifies the detailed regular
264
  (non-atomic) memory synchronization order as defined in
265
  [[intro.multithread]] and may provide for operation ordering. Its
266
  enumerated values and their meanings are as follows:
267
 
268
- - `memory_order_relaxed`: no operation orders memory.
269
- - `memory_order_release`, `memory_order_acq_rel`, and
270
- `memory_order_seq_cst`: a store operation performs a release operation
271
- on the affected memory location.
272
- - `memory_order_consume`: a load operation performs a consume operation
273
  on the affected memory location. \[*Note 1*: Prefer
274
- `memory_order_acquire`, which provides stronger guarantees than
275
- `memory_order_consume`. Implementations have found it infeasible to
276
- provide performance better than that of `memory_order_acquire`.
277
  Specification revisions are under consideration. — *end note*]
278
- - `memory_order_acquire`, `memory_order_acq_rel`, and
279
- `memory_order_seq_cst`: a load operation performs an acquire operation
280
- on the affected memory location.
281
 
282
- [*Note 2*: Atomic operations specifying `memory_order_relaxed` are
283
  relaxed with respect to memory ordering. Implementations must still
284
  guarantee that any given atomic access to a particular atomic object be
285
  indivisible with respect to all other atomic accesses to that
286
  object. — *end note*]
287
 
288
- An atomic operation *A* that performs a release operation on an atomic
289
- object *M* synchronizes with an atomic operation *B* that performs an
290
- acquire operation on *M* and takes its value from any side effect in the
291
- release sequence headed by *A*.
292
-
293
- There shall be a single total order *S* on all `memory_order_seq_cst`
294
- operations, consistent with the “happens before” order and modification
295
- orders for all affected locations, such that each `memory_order_seq_cst`
296
- operation *B* that loads a value from an atomic object *M* observes one
297
- of the following values:
298
-
299
- - the result of the last modification *A* of *M* that precedes *B* in
300
- *S*, if it exists, or
301
- - if *A* exists, the result of some modification of *M* that is not
302
- `memory_order_seq_cst` and that does not happen before *A*, or
303
- - if *A* does not exist, the result of some modification of *M* that is
304
- not `memory_order_seq_cst`.
305
-
306
- [*Note 3*: Although it is not explicitly required that *S* include
307
- locks, it can always be extended to an order that does include lock and
308
- unlock operations, since the ordering between those is already included
309
- in the “happens before” ordering. *end note*]
310
-
311
- For an atomic operation *B* that reads the value of an atomic object
312
- *M*, if there is a `memory_order_seq_cst` fence *X* sequenced before
313
- *B*, then *B* observes either the last `memory_order_seq_cst`
314
- modification of *M* preceding *X* in the total order *S* or a later
315
- modification of *M* in its modification order.
316
-
317
- For atomic operations *A* and *B* on an atomic object *M*, where *A*
318
- modifies *M* and *B* takes its value, if there is a
319
- `memory_order_seq_cst` fence *X* such that *A* is sequenced before *X*
320
- and *B* follows *X* in *S*, then *B* observes either the effects of *A*
321
- or a later modification of *M* in its modification order.
322
-
323
- For atomic operations *A* and *B* on an atomic object *M*, where *A*
324
- modifies *M* and *B* takes its value, if there are
325
- `memory_order_seq_cst` fences *X* and *Y* such that *A* is sequenced
326
- before *X*, *Y* is sequenced before *B*, and *X* precedes *Y* in *S*,
327
- then *B* observes either the effects of *A* or a later modification of
328
- *M* in its modification order.
329
-
330
- For atomic modifications *A* and *B* of an atomic object *M*, *B* occurs
331
- later than *A* in the modification order of *M* if:
332
-
333
- - there is a `memory_order_seq_cst` fence *X* such that *A* is sequenced
334
- before *X*, and *X* precedes *B* in *S*, or
335
- - there is a `memory_order_seq_cst` fence *Y* such that *Y* is sequenced
336
- before *B*, and *A* precedes *Y* in *S*, or
337
- - there are `memory_order_seq_cst` fences *X* and *Y* such that *A* is
338
- sequenced before *X*, *Y* is sequenced before *B*, and *X* precedes
339
- *Y* in *S*.
340
-
341
- [*Note 4*: `memory_order_seq_cst` ensures sequential consistency only
342
  for a program that is free of data races and uses exclusively
343
- `memory_order_seq_cst` operations. Any use of weaker ordering will
344
- invalidate this guarantee unless extreme care is used. In particular,
345
- `memory_order_seq_cst` fences ensure a total order only for the fences
346
- themselves. Fences cannot, in general, be used to restore sequential
347
- consistency for atomic operations with weaker ordering
348
- specifications. — *end note*]
349
 
350
  Implementations should ensure that no “out-of-thin-air” values are
351
  computed that circularly depend on their own computation.
352
 
353
- [*Note 5*:
354
 
355
  For example, with `x` and `y` initially zero,
356
 
357
  ``` cpp
358
  // Thread 1:
359
- r1 = y.load(memory_order_relaxed);
360
- x.store(r1, memory_order_relaxed);
361
  ```
362
 
363
  ``` cpp
364
  // Thread 2:
365
- r2 = x.load(memory_order_relaxed);
366
- y.store(r2, memory_order_relaxed);
367
  ```
368
 
369
  should not produce `r1 == r2 == 42`, since the store of 42 to `y` is
370
  only possible if the store to `x` stores `42`, which circularly depends
371
  on the store to `y` storing `42`. Note that without this restriction,
372
  such an execution is possible.
373
 
374
  — *end note*]
375
 
376
- [*Note 6*:
377
 
378
  The recommendation similarly disallows `r1 == r2 == 42` in the following
379
  example, with `x` and `y` again initially zero:
380
 
381
  ``` cpp
382
  // Thread 1:
383
- r1 = x.load(memory_order_relaxed);
384
- if (r1 == 42) y.store(42, memory_order_relaxed);
385
  ```
386
 
387
  ``` cpp
388
  // Thread 2:
389
- r2 = y.load(memory_order_relaxed);
390
- if (r2 == 42) x.store(42, memory_order_relaxed);
391
  ```
392
 
393
  — *end note*]
394
 
395
  Atomic read-modify-write operations shall always read the last value (in
@@ -403,19 +452,20 @@ a reasonable amount of time.
403
  template<class T>
404
  T kill_dependency(T y) noexcept;
405
  ```
406
 
407
  *Effects:* The argument does not carry a dependency to the return
408
- value ([[intro.multithread]]).
409
 
410
  *Returns:* `y`.
411
 
412
  ## Lock-free property <a id="atomics.lockfree">[[atomics.lockfree]]</a>
413
 
414
  ``` cpp
415
  #define ATOMIC_BOOL_LOCK_FREE unspecified
416
  #define ATOMIC_CHAR_LOCK_FREE unspecified
 
417
  #define ATOMIC_CHAR16_T_LOCK_FREE unspecified
418
  #define ATOMIC_CHAR32_T_LOCK_FREE unspecified
419
  #define ATOMIC_WCHAR_T_LOCK_FREE unspecified
420
  #define ATOMIC_SHORT_LOCK_FREE unspecified
421
  #define ATOMIC_INT_LOCK_FREE unspecified
@@ -430,134 +480,747 @@ grouped together. The properties also apply to the corresponding
430
  (partial) specializations of the `atomic` template. A value of 0
431
  indicates that the types are never lock-free. A value of 1 indicates
432
  that the types are sometimes lock-free. A value of 2 indicates that the
433
  types are always lock-free.
434
 
435
- The function `atomic_is_lock_free` ([[atomics.types.operations]])
 
 
 
 
 
 
 
436
  indicates whether the object is lock-free. In any given program
437
  execution, the result of the lock-free query shall be consistent for all
438
  pointers of the same type.
439
 
440
  Atomic operations that are not lock-free are considered to potentially
441
- block ([[intro.progress]]).
442
 
443
- [*Note 1*: Operations that are lock-free should also be address-free.
444
  That is, atomic operations on the same memory location via two different
445
  addresses will communicate atomically. The implementation should not
446
  depend on any per-process state. This restriction enables communication
447
  by memory that is mapped into a process more than once and by memory
448
  that is shared between two processes. — *end note*]
449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
  ## Class template `atomic` <a id="atomics.types.generic">[[atomics.types.generic]]</a>
451
 
452
  ``` cpp
453
  namespace std {
454
  template<class T> struct atomic {
455
  using value_type = T;
 
456
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
457
  bool is_lock_free() const volatile noexcept;
458
  bool is_lock_free() const noexcept;
459
- void store(T, memory_order = memory_order_seq_cst) volatile noexcept;
460
- void store(T, memory_order = memory_order_seq_cst) noexcept;
461
- T load(memory_order = memory_order_seq_cst) const volatile noexcept;
462
- T load(memory_order = memory_order_seq_cst) const noexcept;
 
 
 
 
 
 
463
  operator T() const volatile noexcept;
464
  operator T() const noexcept;
465
- T exchange(T, memory_order = memory_order_seq_cst) volatile noexcept;
466
- T exchange(T, memory_order = memory_order_seq_cst) noexcept;
 
 
 
 
 
467
  bool compare_exchange_weak(T&, T, memory_order, memory_order) volatile noexcept;
468
  bool compare_exchange_weak(T&, T, memory_order, memory_order) noexcept;
469
  bool compare_exchange_strong(T&, T, memory_order, memory_order) volatile noexcept;
470
  bool compare_exchange_strong(T&, T, memory_order, memory_order) noexcept;
471
- bool compare_exchange_weak(T&, T, memory_order = memory_order_seq_cst) volatile noexcept;
472
- bool compare_exchange_weak(T&, T, memory_order = memory_order_seq_cst) noexcept;
473
- bool compare_exchange_strong(T&, T, memory_order = memory_order_seq_cst) volatile noexcept;
474
- bool compare_exchange_strong(T&, T, memory_order = memory_order_seq_cst) noexcept;
475
 
476
- atomic() noexcept = default;
477
- constexpr atomic(T) noexcept;
478
- atomic(const atomic&) = delete;
479
- atomic& operator=(const atomic&) = delete;
480
- atomic& operator=(const atomic&) volatile = delete;
481
- T operator=(T) volatile noexcept;
482
- T operator=(T) noexcept;
483
  };
484
  }
485
  ```
486
 
487
- The template argument for `T` shall be trivially copyable (
488
- [[basic.types]]).
 
 
 
 
 
 
 
 
 
489
 
490
  [*Note 1*: Type arguments that are not also statically initializable
491
  may be difficult to use. — *end note*]
492
 
493
  The specialization `atomic<bool>` is a standard-layout struct.
494
 
495
  [*Note 2*: The representation of an atomic specialization need not have
496
- the same size as its corresponding argument type. Specializations should
497
- have the same size whenever possible, as this reduces the effort
498
- required to port existing code. — *end note*]
499
 
500
  ### Operations on atomic types <a id="atomics.types.operations">[[atomics.types.operations]]</a>
501
 
502
- [*Note 1*: Many operations are volatile-qualified. The “volatile as
503
- device register” semantics have not changed in the standard. This
504
- qualification means that volatility is preserved when applying these
505
- operations to volatile objects. It does not mean that operations on
506
- non-volatile objects become volatile. — *end note*]
507
-
508
  ``` cpp
509
- atomic() noexcept = default;
510
  ```
511
 
512
- *Effects:* Leaves the atomic object in an uninitialized state.
513
 
514
- [*Note 1*: These semantics ensure compatibility with C. *end note*]
 
515
 
516
  ``` cpp
517
  constexpr atomic(T desired) noexcept;
518
  ```
519
 
520
  *Effects:* Initializes the object with the value `desired`.
521
- Initialization is not an atomic operation ([[intro.multithread]]).
522
 
523
- [*Note 2*: It is possible to have an access to an atomic object `A`
524
  race with its construction, for example by communicating the address of
525
  the just-constructed object `A` to another thread via
526
- `memory_order_relaxed` operations on a suitable atomic pointer variable,
527
- and then immediately accessing `A` in the receiving thread. This results
528
- in undefined behavior. — *end note*]
529
-
530
- ``` cpp
531
- #define ATOMIC_VAR_INIT(value) see below
532
- ```
533
-
534
- The macro expands to a token sequence suitable for constant
535
- initialization of an atomic variable of static storage duration of a
536
- type that is initialization-compatible with `value`.
537
-
538
- [*Note 3*: This operation may need to initialize locks. — *end note*]
539
-
540
- Concurrent access to the variable being initialized, even via an atomic
541
- operation, constitutes a data race.
542
-
543
- [*Example 1*:
544
-
545
- ``` cpp
546
- atomic<int> v = ATOMIC_VAR_INIT(5);
547
- ```
548
-
549
- — *end example*]
550
 
551
  ``` cpp
552
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
553
  ```
554
 
555
  The `static` data member `is_always_lock_free` is `true` if the atomic
556
  type’s operations are always lock-free, and `false` otherwise.
557
 
558
- [*Note 4*: The value of `is_always_lock_free` is consistent with the
559
  value of the corresponding `ATOMIC_..._LOCK_FREE` macro, if
560
  defined. — *end note*]
561
 
562
  ``` cpp
563
  bool is_lock_free() const volatile noexcept;
@@ -565,63 +1228,79 @@ bool is_lock_free() const noexcept;
565
  ```
566
 
567
  *Returns:* `true` if the object’s operations are lock-free, `false`
568
  otherwise.
569
 
570
- [*Note 5*: The return value of the `is_lock_free` member function is
571
  consistent with the value of `is_always_lock_free` for the same
572
  type. — *end note*]
573
 
574
  ``` cpp
575
- void store(T desired, memory_order order = memory_order_seq_cst) volatile noexcept;
576
- void store(T desired, memory_order order = memory_order_seq_cst) noexcept;
577
  ```
578
 
579
- *Requires:* The `order` argument shall not be `memory_order_consume`,
580
- `memory_order_acquire`, nor `memory_order_acq_rel`.
 
 
 
 
581
 
582
  *Effects:* Atomically replaces the value pointed to by `this` with the
583
  value of `desired`. Memory is affected according to the value of
584
  `order`.
585
 
586
  ``` cpp
587
  T operator=(T desired) volatile noexcept;
588
  T operator=(T desired) noexcept;
589
  ```
590
 
591
- *Effects:* Equivalent to: `store(desired)`.
 
 
 
592
 
593
  *Returns:* `desired`.
594
 
595
  ``` cpp
596
- T load(memory_order order = memory_order_seq_cst) const volatile noexcept;
597
- T load(memory_order order = memory_order_seq_cst) const noexcept;
598
  ```
599
 
600
- *Requires:* The `order` argument shall not be `memory_order_release` nor
601
- `memory_order_acq_rel`.
 
 
 
602
 
603
  *Effects:* Memory is affected according to the value of `order`.
604
 
605
  *Returns:* Atomically returns the value pointed to by `this`.
606
 
607
  ``` cpp
608
  operator T() const volatile noexcept;
609
  operator T() const noexcept;
610
  ```
611
 
 
 
 
612
  *Effects:* Equivalent to: `return load();`
613
 
614
  ``` cpp
615
- T exchange(T desired, memory_order order = memory_order_seq_cst) volatile noexcept;
616
- T exchange(T desired, memory_order order = memory_order_seq_cst) noexcept;
617
  ```
618
 
 
 
 
619
  *Effects:* Atomically replaces the value pointed to by `this` with
620
  `desired`. Memory is affected according to the value of `order`. These
621
  operations are atomic read-modify-write
622
- operations ([[intro.multithread]]).
623
 
624
  *Returns:* Atomically returns the value pointed to by `this` immediately
625
  before the effects.
626
 
627
  ``` cpp
@@ -632,57 +1311,60 @@ bool compare_exchange_weak(T& expected, T desired,
632
  bool compare_exchange_strong(T& expected, T desired,
633
  memory_order success, memory_order failure) volatile noexcept;
634
  bool compare_exchange_strong(T& expected, T desired,
635
  memory_order success, memory_order failure) noexcept;
636
  bool compare_exchange_weak(T& expected, T desired,
637
- memory_order order = memory_order_seq_cst) volatile noexcept;
638
  bool compare_exchange_weak(T& expected, T desired,
639
- memory_order order = memory_order_seq_cst) noexcept;
640
  bool compare_exchange_strong(T& expected, T desired,
641
- memory_order order = memory_order_seq_cst) volatile noexcept;
642
  bool compare_exchange_strong(T& expected, T desired,
643
- memory_order order = memory_order_seq_cst) noexcept;
644
  ```
645
 
646
- *Requires:* The `failure` argument shall not be `memory_order_release`
647
- nor `memory_order_acq_rel`.
 
 
 
648
 
649
  *Effects:* Retrieves the value in `expected`. It then atomically
650
- compares the contents of the memory pointed to by `this` for equality
651
- with that previously retrieved from `expected`, and if true, replaces
652
- the contents of the memory pointed to by `this` with that in `desired`.
653
- If and only if the comparison is true, memory is affected according to
654
- the value of `success`, and if the comparison is false, memory is
655
- affected according to the value of `failure`. When only one
656
- `memory_order` argument is supplied, the value of `success` is `order`,
657
- and the value of `failure` is `order` except that a value of
658
- `memory_order_acq_rel` shall be replaced by the value
659
- `memory_order_acquire` and a value of `memory_order_release` shall be
660
- replaced by the value `memory_order_relaxed`. If and only if the
661
- comparison is false then, after the atomic operation, the contents of
662
- the memory in `expected` are replaced by the value read from the memory
663
- pointed to by `this` during the atomic comparison. If the operation
664
- returns `true`, these operations are atomic read-modify-write
665
- operations ([[intro.multithread]]) on the memory pointed to by `this`.
666
  Otherwise, these operations are atomic load operations on that memory.
667
 
668
  *Returns:* The result of the comparison.
669
 
670
- [*Note 6*:
671
 
672
- For example, the effect of `compare_exchange_strong` is
 
673
 
674
  ``` cpp
675
  if (memcmp(this, &expected, sizeof(*this)) == 0)
676
  memcpy(this, &desired, sizeof(*this));
677
  else
678
  memcpy(expected, this, sizeof(*this));
679
  ```
680
 
681
  — *end note*]
682
 
683
- [*Example 2*:
684
 
685
  The expected use of the compare-and-exchange operations is as follows.
686
  The compare-and-exchange operations will update `expected` when another
687
  iteration of the loop is needed.
688
 
@@ -693,16 +1375,16 @@ do {
693
  } while (!current.compare_exchange_weak(expected, desired));
694
  ```
695
 
696
  — *end example*]
697
 
698
- [*Example 3*:
699
 
700
  Because the expected value is updated only on failure, code releasing
701
- the memory containing the `expected` value on success will work. E.g.
702
- list head insertion will act atomically and would not introduce a data
703
- race in the following code:
704
 
705
  ``` cpp
706
  do {
707
  p->next = head; // make new list node point to the current head
708
  } while (!head.compare_exchange_weak(p->next, p)); // try to insert
@@ -718,90 +1400,185 @@ the atomic object.
718
  *Remarks:* A weak compare-and-exchange operation may fail spuriously.
719
  That is, even when the contents of memory referred to by `expected` and
720
  `this` are equal, it may return `false` and store back to `expected` the
721
  same memory contents that were originally there.
722
 
723
- [*Note 7*: This spurious failure enables implementation of
724
  compare-and-exchange on a broader class of machines, e.g., load-locked
725
  store-conditional machines. A consequence of spurious failure is that
726
  nearly all uses of weak compare-and-exchange will be in a loop. When a
727
  compare-and-exchange is in a loop, the weak version will yield better
728
  performance on some platforms. When a weak compare-and-exchange would
729
  require a loop and a strong one would not, the strong one is
730
  preferable. — *end note*]
731
 
732
- [*Note 8*: The `memcpy` and `memcmp` semantics of the
733
- compare-and-exchange operations may result in failed comparisons for
734
- values that compare equal with `operator==` if the underlying type has
735
- padding bits, trap bits, or alternate representations of the same value.
736
- Thus, `compare_exchange_strong` should be used with extreme care. On the
737
- other hand, `compare_exchange_weak` should converge
738
- rapidly. *end note*]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739
 
740
  ### Specializations for integers <a id="atomics.types.int">[[atomics.types.int]]</a>
741
 
742
- There are specializations of the `atomic` template for the integral
743
- types `char`, `signed char`, `unsigned char`, `short`, `unsigned short`,
744
- `int`, `unsigned int`, `long`, `unsigned long`, `long long`,
745
- `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`, and any other
746
- types needed by the typedefs in the header `<cstdint>`. For each such
747
- integral type `integral`, the specialization `atomic<integral>` provides
748
- additional atomic operations appropriate to integral types.
 
749
 
750
- [*Note 1*: For the specialization `atomic<bool>`, see
751
  [[atomics.types.generic]]. — *end note*]
752
 
753
  ``` cpp
754
  namespace std {
755
  template<> struct atomic<integral> {
756
  using value_type = integral;
757
  using difference_type = value_type;
 
758
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
759
  bool is_lock_free() const volatile noexcept;
760
  bool is_lock_free() const noexcept;
761
- void store(integral, memory_order = memory_order_seq_cst) volatile noexcept;
762
- void store(integral, memory_order = memory_order_seq_cst) noexcept;
763
- integral load(memory_order = memory_order_seq_cst) const volatile noexcept;
764
- integral load(memory_order = memory_order_seq_cst) const noexcept;
765
- operator integral() const volatile noexcept;
766
- operator integral() const noexcept;
767
- integral exchange(integral, memory_order = memory_order_seq_cst) volatile noexcept;
768
- integral exchange(integral, memory_order = memory_order_seq_cst) noexcept;
769
- bool compare_exchange_weak(integral&, integral,
770
- memory_order, memory_order) volatile noexcept;
771
- bool compare_exchange_weak(integral&, integral,
772
- memory_order, memory_order) noexcept;
773
- bool compare_exchange_strong(integral&, integral,
774
- memory_order, memory_order) volatile noexcept;
775
- bool compare_exchange_strong(integral&, integral,
776
- memory_order, memory_order) noexcept;
777
- bool compare_exchange_weak(integral&, integral,
778
- memory_order = memory_order_seq_cst) volatile noexcept;
779
- bool compare_exchange_weak(integral&, integral,
780
- memory_order = memory_order_seq_cst) noexcept;
781
- bool compare_exchange_strong(integral&, integral,
782
- memory_order = memory_order_seq_cst) volatile noexcept;
783
- bool compare_exchange_strong(integral&, integral,
784
- memory_order = memory_order_seq_cst) noexcept;
785
- integral fetch_add(integral, memory_order = memory_order_seq_cst) volatile noexcept;
786
- integral fetch_add(integral, memory_order = memory_order_seq_cst) noexcept;
787
- integral fetch_sub(integral, memory_order = memory_order_seq_cst) volatile noexcept;
788
- integral fetch_sub(integral, memory_order = memory_order_seq_cst) noexcept;
789
- integral fetch_and(integral, memory_order = memory_order_seq_cst) volatile noexcept;
790
- integral fetch_and(integral, memory_order = memory_order_seq_cst) noexcept;
791
- integral fetch_or(integral, memory_order = memory_order_seq_cst) volatile noexcept;
792
- integral fetch_or(integral, memory_order = memory_order_seq_cst) noexcept;
793
- integral fetch_xor(integral, memory_order = memory_order_seq_cst) volatile noexcept;
794
- integral fetch_xor(integral, memory_order = memory_order_seq_cst) noexcept;
795
 
796
- atomic() noexcept = default;
797
  constexpr atomic(integral) noexcept;
798
  atomic(const atomic&) = delete;
799
  atomic& operator=(const atomic&) = delete;
800
  atomic& operator=(const atomic&) volatile = delete;
 
 
 
801
  integral operator=(integral) volatile noexcept;
802
  integral operator=(integral) noexcept;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
803
 
804
  integral operator++(int) volatile noexcept;
805
  integral operator++(int) noexcept;
806
  integral operator--(int) volatile noexcept;
807
  integral operator--(int) noexcept;
@@ -817,94 +1594,256 @@ namespace std {
817
  integral operator&=(integral) noexcept;
818
  integral operator|=(integral) volatile noexcept;
819
  integral operator|=(integral) noexcept;
820
  integral operator^=(integral) volatile noexcept;
821
  integral operator^=(integral) noexcept;
 
 
 
 
 
 
 
822
  };
823
  }
824
  ```
825
 
826
  The atomic integral specializations are standard-layout structs. They
827
- each have a trivial default constructor and a trivial destructor.
828
 
829
  Descriptions are provided below only for members that differ from the
830
  primary template.
831
 
832
  The following operations perform arithmetic computations. The key,
833
  operator, and computation correspondence is:
834
 
835
- **Table: Atomic arithmetic computations** <a id="tab:atomic.arithmetic.computations">[tab:atomic.arithmetic.computations]</a>
836
 
837
  | | | | | | |
838
  | ----- | --- | -------------------- | ----- | --- | -------------------- |
839
  | `add` | `+` | addition | `sub` | `-` | subtraction |
840
  | `or` | `|` | bitwise inclusive or | `xor` | `^` | bitwise exclusive or |
841
  | `and` | `&` | bitwise and | | | |
842
 
843
  ``` cpp
844
- T fetch_key(T operand, memory_order order = memory_order_seq_cst) volatile noexcept;
845
- T fetch_key(T operand, memory_order order = memory_order_seq_cst) noexcept;
846
  ```
847
 
 
 
 
848
  *Effects:* Atomically replaces the value pointed to by `this` with the
849
  result of the computation applied to the value pointed to by `this` and
850
  the given `operand`. Memory is affected according to the value of
851
  `order`. These operations are atomic read-modify-write
852
- operations ([[intro.multithread]]).
853
 
854
  *Returns:* Atomically, the value pointed to by `this` immediately before
855
  the effects.
856
 
857
- *Remarks:* For signed integer types, arithmetic is defined to use two’s
858
- complement representation. There are no undefined results.
 
 
 
 
 
859
 
860
  ``` cpp
861
  T operator op=(T operand) volatile noexcept;
862
  T operator op=(T operand) noexcept;
863
  ```
864
 
 
 
 
865
  *Effects:* Equivalent to:
866
  `return fetch_`*`key`*`(operand) `*`op`*` operand;`
867
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868
  ### Partial specialization for pointers <a id="atomics.types.pointer">[[atomics.types.pointer]]</a>
869
 
870
  ``` cpp
871
  namespace std {
872
  template<class T> struct atomic<T*> {
873
  using value_type = T*;
874
  using difference_type = ptrdiff_t;
 
875
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
876
  bool is_lock_free() const volatile noexcept;
877
  bool is_lock_free() const noexcept;
878
- void store(T*, memory_order = memory_order_seq_cst) volatile noexcept;
879
- void store(T*, memory_order = memory_order_seq_cst) noexcept;
880
- T* load(memory_order = memory_order_seq_cst) const volatile noexcept;
881
- T* load(memory_order = memory_order_seq_cst) const noexcept;
 
 
 
 
 
 
 
 
 
882
  operator T*() const volatile noexcept;
883
  operator T*() const noexcept;
884
- T* exchange(T*, memory_order = memory_order_seq_cst) volatile noexcept;
885
- T* exchange(T*, memory_order = memory_order_seq_cst) noexcept;
 
886
  bool compare_exchange_weak(T*&, T*, memory_order, memory_order) volatile noexcept;
887
  bool compare_exchange_weak(T*&, T*, memory_order, memory_order) noexcept;
888
  bool compare_exchange_strong(T*&, T*, memory_order, memory_order) volatile noexcept;
889
  bool compare_exchange_strong(T*&, T*, memory_order, memory_order) noexcept;
890
- bool compare_exchange_weak(T*&, T*, memory_order = memory_order_seq_cst) volatile noexcept;
891
- bool compare_exchange_weak(T*&, T*, memory_order = memory_order_seq_cst) noexcept;
892
- bool compare_exchange_strong(T*&, T*, memory_order = memory_order_seq_cst) volatile noexcept;
893
- bool compare_exchange_strong(T*&, T*, memory_order = memory_order_seq_cst) noexcept;
894
- T* fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst) volatile noexcept;
895
- T* fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst) noexcept;
896
- T* fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst) volatile noexcept;
897
- T* fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst) noexcept;
898
 
899
- atomic() noexcept = default;
900
- constexpr atomic(T*) noexcept;
901
- atomic(const atomic&) = delete;
902
- atomic& operator=(const atomic&) = delete;
903
- atomic& operator=(const atomic&) volatile = delete;
904
- T* operator=(T*) volatile noexcept;
905
- T* operator=(T*) noexcept;
906
 
907
  T* operator++(int) volatile noexcept;
908
  T* operator++(int) noexcept;
909
  T* operator--(int) volatile noexcept;
910
  T* operator--(int) noexcept;
@@ -914,47 +1853,55 @@ namespace std {
914
  T* operator--() noexcept;
915
  T* operator+=(ptrdiff_t) volatile noexcept;
916
  T* operator+=(ptrdiff_t) noexcept;
917
  T* operator-=(ptrdiff_t) volatile noexcept;
918
  T* operator-=(ptrdiff_t) noexcept;
 
 
 
 
 
 
 
919
  };
920
  }
921
  ```
922
 
923
  There is a partial specialization of the `atomic` class template for
924
  pointers. Specializations of this partial specialization are
925
- standard-layout structs. They each have a trivial default constructor
926
- and a trivial destructor.
927
 
928
  Descriptions are provided below only for members that differ from the
929
  primary template.
930
 
931
  The following operations perform pointer arithmetic. The key, operator,
932
  and computation correspondence is:
933
 
934
- **Table: Atomic pointer computations** <a id="tab:atomic.pointer.computations">[tab:atomic.pointer.computations]</a>
935
 
936
  | | | | | | |
937
  | ----- | --- | -------- | ----- | --- | ----------- |
938
  | `add` | `+` | addition | `sub` | `-` | subtraction |
939
 
940
  ``` cpp
941
- T* fetch_key(ptrdiff_t operand, memory_order order = memory_order_seq_cst) volatile noexcept;
942
- T* fetch_key(ptrdiff_t operand, memory_order order = memory_order_seq_cst) noexcept;
943
  ```
944
 
945
- *Requires:* T shall be an object type, otherwise the program is
946
- ill-formed.
 
 
947
 
948
  [*Note 1*: Pointer arithmetic on `void*` or function pointers is
949
  ill-formed. — *end note*]
950
 
951
  *Effects:* Atomically replaces the value pointed to by `this` with the
952
  result of the computation applied to the value pointed to by `this` and
953
  the given `operand`. Memory is affected according to the value of
954
  `order`. These operations are atomic read-modify-write
955
- operations ([[intro.multithread]]).
956
 
957
  *Returns:* Atomically, the value pointed to by `this` immediately before
958
  the effects.
959
 
960
  *Remarks:* The result may be an undefined address, but the operations
@@ -963,43 +1910,526 @@ otherwise have no undefined behavior.
963
  ``` cpp
964
  T* operator op=(ptrdiff_t operand) volatile noexcept;
965
  T* operator op=(ptrdiff_t operand) noexcept;
966
  ```
967
 
 
 
 
968
  *Effects:* Equivalent to:
969
  `return fetch_`*`key`*`(operand) `*`op`*` operand;`
970
 
971
  ### Member operators common to integers and pointers to objects <a id="atomics.types.memop">[[atomics.types.memop]]</a>
972
 
973
  ``` cpp
974
- T operator++(int) volatile noexcept;
975
- T operator++(int) noexcept;
976
  ```
977
 
 
 
 
978
  *Effects:* Equivalent to: `return fetch_add(1);`
979
 
980
  ``` cpp
981
- T operator--(int) volatile noexcept;
982
- T operator--(int) noexcept;
983
  ```
984
 
 
 
 
985
  *Effects:* Equivalent to: `return fetch_sub(1);`
986
 
987
  ``` cpp
988
- T operator++() volatile noexcept;
989
- T operator++() noexcept;
990
  ```
991
 
 
 
 
992
  *Effects:* Equivalent to: `return fetch_add(1) + 1;`
993
 
994
  ``` cpp
995
- T operator--() volatile noexcept;
996
- T operator--() noexcept;
997
  ```
998
 
 
 
 
999
  *Effects:* Equivalent to: `return fetch_sub(1) - 1;`
1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
  ## Non-member functions <a id="atomics.nonmembers">[[atomics.nonmembers]]</a>
1002
 
1003
  A non-member function template whose name matches the pattern `atomic_f`
1004
  or the pattern `atomic_f_explicit` invokes the member function `f`, with
1005
  the value of the first parameter as the object expression and the values
@@ -1007,56 +2437,38 @@ of the remaining parameters (if any) as the arguments of the member
1007
  function call, in order. An argument for a parameter of type
1008
  `atomic<T>::value_type*` is dereferenced when passed to the member
1009
  function call. If no such member function exists, the program is
1010
  ill-formed.
1011
 
1012
- ``` cpp
1013
- template<class T>
1014
- void atomic_init(volatile atomic<T>* object, typename atomic<T>::value_type desired) noexcept;
1015
- template<class T>
1016
- void atomic_init(atomic<T>* object, typename atomic<T>::value_type desired) noexcept;
1017
- ```
1018
-
1019
- *Effects:* Non-atomically initializes `*object` with value `desired`.
1020
- This function shall only be applied to objects that have been default
1021
- constructed, and then only once.
1022
-
1023
- [*Note 1*: These semantics ensure compatibility with C. — *end note*]
1024
-
1025
- [*Note 2*: Concurrent access from another thread, even via an atomic
1026
- operation, constitutes a data race. — *end note*]
1027
-
1028
  [*Note 1*: The non-member functions enable programmers to write code
1029
  that can be compiled as either C or C++, for example in a shared header
1030
  file. — *end note*]
1031
 
1032
  ## Flag type and operations <a id="atomics.flag">[[atomics.flag]]</a>
1033
 
1034
  ``` cpp
1035
  namespace std {
1036
  struct atomic_flag {
1037
- bool test_and_set(memory_order = memory_order_seq_cst) volatile noexcept;
1038
- bool test_and_set(memory_order = memory_order_seq_cst) noexcept;
1039
- void clear(memory_order = memory_order_seq_cst) volatile noexcept;
1040
- void clear(memory_order = memory_order_seq_cst) noexcept;
1041
-
1042
- atomic_flag() noexcept = default;
1043
  atomic_flag(const atomic_flag&) = delete;
1044
  atomic_flag& operator=(const atomic_flag&) = delete;
1045
  atomic_flag& operator=(const atomic_flag&) volatile = delete;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046
  };
1047
-
1048
- bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
1049
- bool atomic_flag_test_and_set(atomic_flag*) noexcept;
1050
- bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
1051
- bool atomic_flag_test_and_set_explicit(atomic_flag*, memory_order) noexcept;
1052
- void atomic_flag_clear(volatile atomic_flag*) noexcept;
1053
- void atomic_flag_clear(atomic_flag*) noexcept;
1054
- void atomic_flag_clear_explicit(volatile atomic_flag*, memory_order) noexcept;
1055
- void atomic_flag_clear_explicit(atomic_flag*, memory_order) noexcept;
1056
-
1057
- #define ATOMIC_FLAG_INIT see below
1058
  }
1059
  ```
1060
 
1061
  The `atomic_flag` type provides the classic test-and-set functionality.
1062
  It has two states, set and clear.
@@ -1065,99 +2477,170 @@ Operations on an object of type `atomic_flag` shall be lock-free.
1065
 
1066
  [*Note 1*: Hence the operations should also be
1067
  address-free. — *end note*]
1068
 
1069
  The `atomic_flag` type is a standard-layout struct. It has a trivial
1070
- default constructor and a trivial destructor.
1071
 
1072
- The macro `ATOMIC_FLAG_INIT` shall be defined in such a way that it can
1073
- be used to initialize an object of type `atomic_flag` to the clear
1074
- state. The macro can be used in the form:
 
 
1075
 
1076
  ``` cpp
1077
- atomic_flag guard = ATOMIC_FLAG_INIT;
 
 
 
 
 
 
 
1078
  ```
1079
 
1080
- It is unspecified whether the macro can be used in other initialization
1081
- contexts. For a complete static-duration object, that initialization
1082
- shall be static. Unless initialized with `ATOMIC_FLAG_INIT`, it is
1083
- unspecified whether an `atomic_flag` object has an initial state of set
1084
- or clear.
 
 
 
 
1085
 
1086
  ``` cpp
1087
  bool atomic_flag_test_and_set(volatile atomic_flag* object) noexcept;
1088
  bool atomic_flag_test_and_set(atomic_flag* object) noexcept;
1089
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag* object, memory_order order) noexcept;
1090
  bool atomic_flag_test_and_set_explicit(atomic_flag* object, memory_order order) noexcept;
1091
- bool atomic_flag::test_and_set(memory_order order = memory_order_seq_cst) volatile noexcept;
1092
- bool atomic_flag::test_and_set(memory_order order = memory_order_seq_cst) noexcept;
1093
  ```
1094
 
1095
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
1096
  to `true`. Memory is affected according to the value of `order`. These
1097
  operations are atomic read-modify-write
1098
- operations ([[intro.multithread]]).
1099
 
1100
  *Returns:* Atomically, the value of the object immediately before the
1101
  effects.
1102
 
1103
  ``` cpp
1104
  void atomic_flag_clear(volatile atomic_flag* object) noexcept;
1105
  void atomic_flag_clear(atomic_flag* object) noexcept;
1106
  void atomic_flag_clear_explicit(volatile atomic_flag* object, memory_order order) noexcept;
1107
  void atomic_flag_clear_explicit(atomic_flag* object, memory_order order) noexcept;
1108
- void atomic_flag::clear(memory_order order = memory_order_seq_cst) volatile noexcept;
1109
- void atomic_flag::clear(memory_order order = memory_order_seq_cst) noexcept;
1110
  ```
1111
 
1112
- *Requires:* The `order` argument shall not be `memory_order_consume`,
1113
- `memory_order_acquire`, nor `memory_order_acq_rel`.
 
1114
 
1115
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
1116
  to `false`. Memory is affected according to the value of `order`.
1117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118
  ## Fences <a id="atomics.fences">[[atomics.fences]]</a>
1119
 
1120
- This section introduces synchronization primitives called *fences*.
1121
  Fences can have acquire semantics, release semantics, or both. A fence
1122
  with acquire semantics is called an *acquire fence*. A fence with
1123
  release semantics is called a *release fence*.
1124
 
1125
- A release fence *A* synchronizes with an acquire fence *B* if there
1126
- exist atomic operations *X* and *Y*, both operating on some atomic
1127
- object *M*, such that *A* is sequenced before *X*, *X* modifies *M*, *Y*
1128
- is sequenced before *B*, and *Y* reads the value written by *X* or a
1129
- value written by any side effect in the hypothetical release sequence
1130
- *X* would head if it were a release operation.
1131
 
1132
- A release fence *A* synchronizes with an atomic operation *B* that
1133
- performs an acquire operation on an atomic object *M* if there exists an
1134
- atomic operation *X* such that *A* is sequenced before *X*, *X* modifies
1135
- *M*, and *B* reads the value written by *X* or a value written by any
1136
- side effect in the hypothetical release sequence *X* would head if it
1137
- were a release operation.
1138
 
1139
- An atomic operation *A* that is a release operation on an atomic object
1140
- *M* synchronizes with an acquire fence *B* if there exists some atomic
1141
- operation *X* on *M* such that *X* is sequenced before *B* and reads the
1142
- value written by *A* or a value written by any side effect in the
1143
- release sequence headed by *A*.
1144
 
1145
  ``` cpp
1146
  extern "C" void atomic_thread_fence(memory_order order) noexcept;
1147
  ```
1148
 
1149
  *Effects:* Depending on the value of `order`, this operation:
1150
 
1151
- - has no effects, if `order == memory_order_relaxed`;
1152
- - is an acquire fence, if
1153
- `order == memory_order_acquire || order == memory_order_consume`;
1154
- - is a release fence, if `order == memory_order_release`;
1155
  - is both an acquire fence and a release fence, if
1156
- `order == memory_order_acq_rel`;
1157
  - is a sequentially consistent acquire and release fence, if
1158
- `order == memory_order_seq_cst`.
1159
 
1160
  ``` cpp
1161
  extern "C" void atomic_signal_fence(memory_order order) noexcept;
1162
  ```
1163
 
@@ -1171,22 +2654,45 @@ handler. Compiler optimizations and reorderings of loads and stores are
1171
  inhibited in the same way as with `atomic_thread_fence`, but the
1172
  hardware fence instructions that `atomic_thread_fence` would have
1173
  inserted are not emitted. — *end note*]
1174
 
1175
  <!-- Link reference definitions -->
 
 
1176
  [atomics]: #atomics
1177
  [atomics.alias]: #atomics.alias
1178
  [atomics.fences]: #atomics.fences
1179
  [atomics.flag]: #atomics.flag
1180
  [atomics.general]: #atomics.general
1181
  [atomics.lockfree]: #atomics.lockfree
1182
  [atomics.nonmembers]: #atomics.nonmembers
1183
  [atomics.order]: #atomics.order
 
 
 
 
 
 
 
1184
  [atomics.syn]: #atomics.syn
 
1185
  [atomics.types.generic]: #atomics.types.generic
1186
  [atomics.types.int]: #atomics.types.int
1187
  [atomics.types.memop]: #atomics.types.memop
1188
  [atomics.types.operations]: #atomics.types.operations
1189
  [atomics.types.pointer]: #atomics.types.pointer
 
 
 
 
1190
  [basic.types]: basic.md#basic.types
1191
- [intro.multithread]: intro.md#intro.multithread
1192
- [intro.progress]: intro.md#intro.progress
 
 
 
 
 
 
 
 
 
 
4
 
5
  This Clause describes components for fine-grained atomic access. This
6
  access is provided via operations on atomic objects.
7
 
8
  The following subclauses describe atomics requirements and components
9
+ for types and operations, as summarized in [[atomics.summary]].
10
 
11
+ **Table: Atomics library summary** <a id="atomics.summary">[atomics.summary]</a>
12
 
13
  | Subclause | | Header |
14
+ | ------------------------- | --------------------------- | ---------- |
15
+ | [[atomics.alias]] | Type aliases | `<atomic>` |
16
+ | [[atomics.order]] | Order and consistency | |
17
+ | [[atomics.lockfree]] | Lock-free property | |
18
+ | [[atomics.wait]] | Waiting and notifying | |
19
+ | [[atomics.ref.generic]] | Class template `atomic_ref` | |
20
+ | [[atomics.types.generic]] | Class template `atomic` | |
21
+ | [[atomics.nonmembers]] | Non-member functions | |
22
+ | [[atomics.flag]] | Flag type and operations | |
23
  | [[atomics.fences]] | Fences | |
24
 
25
 
26
  ## Header `<atomic>` synopsis <a id="atomics.syn">[[atomics.syn]]</a>
27
 
28
  ``` cpp
29
  namespace std {
30
  // [atomics.order], order and consistency
31
+ enum class memory_order : unspecified;
32
  template<class T>
33
  T kill_dependency(T y) noexcept;
34
 
35
  // [atomics.lockfree], lock-free property
36
  #define ATOMIC_BOOL_LOCK_FREE unspecified
37
  #define ATOMIC_CHAR_LOCK_FREE unspecified
38
+ #define ATOMIC_CHAR8_T_LOCK_FREE unspecified
39
  #define ATOMIC_CHAR16_T_LOCK_FREE unspecified
40
  #define ATOMIC_CHAR32_T_LOCK_FREE unspecified
41
  #define ATOMIC_WCHAR_T_LOCK_FREE unspecified
42
  #define ATOMIC_SHORT_LOCK_FREE unspecified
43
  #define ATOMIC_INT_LOCK_FREE unspecified
44
  #define ATOMIC_LONG_LOCK_FREE unspecified
45
  #define ATOMIC_LLONG_LOCK_FREE unspecified
46
  #define ATOMIC_POINTER_LOCK_FREE unspecified
47
 
48
+ // [atomics.ref.generic], class template atomic_ref
49
+ template<class T> struct atomic_ref;
50
+ // [atomics.ref.pointer], partial specialization for pointers
51
+ template<class T> struct atomic_ref<T*>;
52
+
53
+ // [atomics.types.generic], class template atomic
54
  template<class T> struct atomic;
55
  // [atomics.types.pointer], partial specialization for pointers
56
  template<class T> struct atomic<T*>;
57
 
58
  // [atomics.nonmembers], non-member functions
59
  template<class T>
60
  bool atomic_is_lock_free(const volatile atomic<T>*) noexcept;
61
  template<class T>
62
  bool atomic_is_lock_free(const atomic<T>*) noexcept;
 
 
 
 
63
  template<class T>
64
  void atomic_store(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
65
  template<class T>
66
  void atomic_store(atomic<T>*, typename atomic<T>::value_type) noexcept;
67
  template<class T>
 
77
  template<class T>
78
  T atomic_load_explicit(const volatile atomic<T>*, memory_order) noexcept;
79
  template<class T>
80
  T atomic_load_explicit(const atomic<T>*, memory_order) noexcept;
81
  template<class T>
82
+ T atomic_exchange(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
83
  template<class T>
84
  T atomic_exchange(atomic<T>*, typename atomic<T>::value_type) noexcept;
85
  template<class T>
86
  T atomic_exchange_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
87
  memory_order) noexcept;
 
174
  memory_order) noexcept;
175
  template<class T>
176
  T atomic_fetch_xor_explicit(atomic<T>*, typename atomic<T>::value_type,
177
  memory_order) noexcept;
178
 
179
+ template<class T>
180
+ void atomic_wait(const volatile atomic<T>*, typename atomic<T>::value_type);
181
+ template<class T>
182
+ void atomic_wait(const atomic<T>*, typename atomic<T>::value_type);
183
+ template<class T>
184
+ void atomic_wait_explicit(const volatile atomic<T>*, typename atomic<T>::value_type,
185
+ memory_order);
186
+ template<class T>
187
+ void atomic_wait_explicit(const atomic<T>*, typename atomic<T>::value_type,
188
+ memory_order);
189
+ template<class T>
190
+ void atomic_notify_one(volatile atomic<T>*);
191
+ template<class T>
192
+ void atomic_notify_one(atomic<T>*);
193
+ template<class T>
194
+ void atomic_notify_all(volatile atomic<T>*);
195
+ template<class T>
196
+ void atomic_notify_all(atomic<T>*);
197
 
198
  // [atomics.alias], type aliases
199
  using atomic_bool = atomic<bool>;
200
  using atomic_char = atomic<char>;
201
  using atomic_schar = atomic<signed char>;
 
206
  using atomic_uint = atomic<unsigned int>;
207
  using atomic_long = atomic<long>;
208
  using atomic_ulong = atomic<unsigned long>;
209
  using atomic_llong = atomic<long long>;
210
  using atomic_ullong = atomic<unsigned long long>;
211
+ using atomic_char8_t = atomic<char8_t>;
212
  using atomic_char16_t = atomic<char16_t>;
213
  using atomic_char32_t = atomic<char32_t>;
214
  using atomic_wchar_t = atomic<wchar_t>;
215
 
216
  using atomic_int8_t = atomic<int8_t>;
 
245
  using atomic_size_t = atomic<size_t>;
246
  using atomic_ptrdiff_t = atomic<ptrdiff_t>;
247
  using atomic_intmax_t = atomic<intmax_t>;
248
  using atomic_uintmax_t = atomic<uintmax_t>;
249
 
250
+ using atomic_signed_lock_free = see below;
251
+ using atomic_unsigned_lock_free = see below;
252
+
253
  // [atomics.flag], flag type and operations
254
  struct atomic_flag;
255
+
256
+ bool atomic_flag_test(const volatile atomic_flag*) noexcept;
257
+ bool atomic_flag_test(const atomic_flag*) noexcept;
258
+ bool atomic_flag_test_explicit(const volatile atomic_flag*, memory_order) noexcept;
259
+ bool atomic_flag_test_explicit(const atomic_flag*, memory_order) noexcept;
260
  bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
261
  bool atomic_flag_test_and_set(atomic_flag*) noexcept;
262
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
263
  bool atomic_flag_test_and_set_explicit(atomic_flag*, memory_order) noexcept;
264
  void atomic_flag_clear(volatile atomic_flag*) noexcept;
265
  void atomic_flag_clear(atomic_flag*) noexcept;
266
  void atomic_flag_clear_explicit(volatile atomic_flag*, memory_order) noexcept;
267
  void atomic_flag_clear_explicit(atomic_flag*, memory_order) noexcept;
268
+
269
+ void atomic_flag_wait(const volatile atomic_flag*, bool) noexcept;
270
+ void atomic_flag_wait(const atomic_flag*, bool) noexcept;
271
+ void atomic_flag_wait_explicit(const volatile atomic_flag*,
272
+ bool, memory_order) noexcept;
273
+ void atomic_flag_wait_explicit(const atomic_flag*,
274
+ bool, memory_order) noexcept;
275
+ void atomic_flag_notify_one(volatile atomic_flag*) noexcept;
276
+ void atomic_flag_notify_one(atomic_flag*) noexcept;
277
+ void atomic_flag_notify_all(volatile atomic_flag*) noexcept;
278
+ void atomic_flag_notify_all(atomic_flag*) noexcept;
279
 
280
  // [atomics.fences], fences
281
  extern "C" void atomic_thread_fence(memory_order) noexcept;
282
  extern "C" void atomic_signal_fence(memory_order) noexcept;
283
  }
 
287
 
288
  The type aliases `atomic_intN_t`, `atomic_uintN_t`, `atomic_intptr_t`,
289
  and `atomic_uintptr_t` are defined if and only if `intN_t`, `uintN_t`,
290
  `intptr_t`, and `uintptr_t` are defined, respectively.
291
 
292
+ The type aliases `atomic_signed_lock_free` and
293
+ `atomic_unsigned_lock_free` name specializations of `atomic` whose
294
+ template arguments are integral types, respectively signed and unsigned,
295
+ and whose `is_always_lock_free` property is `true`.
296
+
297
+ [*Note 1*: These aliases are optional in freestanding implementations
298
+ [[compliance]]. — *end note*]
299
+
300
+ Implementations should choose for these aliases the integral
301
+ specializations of `atomic` for which the atomic waiting and notifying
302
+ operations [[atomics.wait]] are most efficient.
303
+
304
  ## Order and consistency <a id="atomics.order">[[atomics.order]]</a>
305
 
306
  ``` cpp
307
  namespace std {
308
+ enum class memory_order : unspecified {
309
+ relaxed, consume, acquire, release, acq_rel, seq_cst
 
310
  };
311
+ inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
312
+ inline constexpr memory_order memory_order_consume = memory_order::consume;
313
+ inline constexpr memory_order memory_order_acquire = memory_order::acquire;
314
+ inline constexpr memory_order memory_order_release = memory_order::release;
315
+ inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
316
+ inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
317
  }
318
  ```
319
 
320
  The enumeration `memory_order` specifies the detailed regular
321
  (non-atomic) memory synchronization order as defined in
322
  [[intro.multithread]] and may provide for operation ordering. Its
323
  enumerated values and their meanings are as follows:
324
 
325
+ - `memory_order::relaxed`: no operation orders memory.
326
+ - `memory_order::release`, `memory_order::acq_rel`, and
327
+ `memory_order::seq_cst`: a store operation performs a release
328
+ operation on the affected memory location.
329
+ - `memory_order::consume`: a load operation performs a consume operation
330
  on the affected memory location. \[*Note 1*: Prefer
331
+ `memory_order::acquire`, which provides stronger guarantees than
332
+ `memory_order::consume`. Implementations have found it infeasible to
333
+ provide performance better than that of `memory_order::acquire`.
334
  Specification revisions are under consideration. — *end note*]
335
+ - `memory_order::acquire`, `memory_order::acq_rel`, and
336
+ `memory_order::seq_cst`: a load operation performs an acquire
337
+ operation on the affected memory location.
338
 
339
+ [*Note 2*: Atomic operations specifying `memory_order::relaxed` are
340
  relaxed with respect to memory ordering. Implementations must still
341
  guarantee that any given atomic access to a particular atomic object be
342
  indivisible with respect to all other atomic accesses to that
343
  object. — *end note*]
344
 
345
+ An atomic operation A that performs a release operation on an atomic
346
+ object M synchronizes with an atomic operation B that performs an
347
+ acquire operation on M and takes its value from any side effect in the
348
+ release sequence headed by A.
349
+
350
+ An atomic operation A on some atomic object M is *coherence-ordered
351
+ before* another atomic operation B on M if
352
+
353
+ - A is a modification, and B reads the value stored by A, or
354
+ - A precedes B in the modification order of M, or
355
+ - A and B are not the same atomic read-modify-write operation, and there
356
+ exists an atomic modification X of M such that A reads the value
357
+ stored by X and X precedes B in the modification order of M, or
358
+ - there exists an atomic modification X of M such that A is
359
+ coherence-ordered before X and X is coherence-ordered before B.
360
+
361
+ There is a single total order S on all `memory_order::seq_cst`
362
+ operations, including fences, that satisfies the following constraints.
363
+ First, if A and B are `memory_order::seq_cst` operations and A strongly
364
+ happens before B, then A precedes B in S. Second, for every pair of
365
+ atomic operations A and B on an object M, where A is coherence-ordered
366
+ before B, the following four conditions are required to be satisfied by
367
+ S:
368
+
369
+ - if A and B are both `memory_order::seq_cst` operations, then A
370
+ precedes B in S; and
371
+ - if A is a `memory_order::seq_cst` operation and B happens before a
372
+ `memory_order::seq_cst` fence Y, then A precedes Y in S; and
373
+ - if a `memory_order::seq_cst` fence X happens before A and B is a
374
+ `memory_order::seq_cst` operation, then X precedes B in S; and
375
+ - if a `memory_order::seq_cst` fence X happens before A and B happens
376
+ before a `memory_order::seq_cst` fence Y, then X precedes Y in S.
377
+
378
+ [*Note 3*: This definition ensures that S is consistent with the
379
+ modification order of any atomic object M. It also ensures that a
380
+ `memory_order::seq_cst` load A of M gets its value either from the last
381
+ modification of M that precedes A in S or from some
382
+ non-`memory_order::seq_cst` modification of M that does not happen
383
+ before any modification of M that precedes A in S. — *end note*]
384
+
385
+ [*Note 4*: We do not require that S be consistent with “happens before”
386
+ [[intro.races]]. This allows more efficient implementation of
387
+ `memory_order::acquire` and `memory_order::release` on some machine
388
+ architectures. It can produce surprising results when these are mixed
389
+ with `memory_order::seq_cst` accesses. — *end note*]
390
+
391
+ [*Note 5*: `memory_order::seq_cst` ensures sequential consistency only
 
 
 
 
 
 
 
392
  for a program that is free of data races and uses exclusively
393
+ `memory_order::seq_cst` atomic operations. Any use of weaker ordering
394
+ will invalidate this guarantee unless extreme care is used. In many
395
+ cases, `memory_order::seq_cst` atomic operations are reorderable with
396
+ respect to other atomic operations performed by the same
397
+ thread. *end note*]
 
398
 
399
  Implementations should ensure that no “out-of-thin-air” values are
400
  computed that circularly depend on their own computation.
401
 
402
+ [*Note 6*:
403
 
404
  For example, with `x` and `y` initially zero,
405
 
406
  ``` cpp
407
  // Thread 1:
408
+ r1 = y.load(memory_order::relaxed);
409
+ x.store(r1, memory_order::relaxed);
410
  ```
411
 
412
  ``` cpp
413
  // Thread 2:
414
+ r2 = x.load(memory_order::relaxed);
415
+ y.store(r2, memory_order::relaxed);
416
  ```
417
 
418
  should not produce `r1 == r2 == 42`, since the store of 42 to `y` is
419
  only possible if the store to `x` stores `42`, which circularly depends
420
  on the store to `y` storing `42`. Note that without this restriction,
421
  such an execution is possible.
422
 
423
  — *end note*]
424
 
425
+ [*Note 7*:
426
 
427
  The recommendation similarly disallows `r1 == r2 == 42` in the following
428
  example, with `x` and `y` again initially zero:
429
 
430
  ``` cpp
431
  // Thread 1:
432
+ r1 = x.load(memory_order::relaxed);
433
+ if (r1 == 42) y.store(42, memory_order::relaxed);
434
  ```
435
 
436
  ``` cpp
437
  // Thread 2:
438
+ r2 = y.load(memory_order::relaxed);
439
+ if (r2 == 42) x.store(42, memory_order::relaxed);
440
  ```
441
 
442
  — *end note*]
443
 
444
  Atomic read-modify-write operations shall always read the last value (in
 
452
  template<class T>
453
  T kill_dependency(T y) noexcept;
454
  ```
455
 
456
  *Effects:* The argument does not carry a dependency to the return
457
+ value [[intro.multithread]].
458
 
459
  *Returns:* `y`.
460
 
461
  ## Lock-free property <a id="atomics.lockfree">[[atomics.lockfree]]</a>
462
 
463
  ``` cpp
464
  #define ATOMIC_BOOL_LOCK_FREE unspecified
465
  #define ATOMIC_CHAR_LOCK_FREE unspecified
466
+ #define ATOMIC_CHAR8_T_LOCK_FREE unspecified
467
  #define ATOMIC_CHAR16_T_LOCK_FREE unspecified
468
  #define ATOMIC_CHAR32_T_LOCK_FREE unspecified
469
  #define ATOMIC_WCHAR_T_LOCK_FREE unspecified
470
  #define ATOMIC_SHORT_LOCK_FREE unspecified
471
  #define ATOMIC_INT_LOCK_FREE unspecified
 
480
  (partial) specializations of the `atomic` template. A value of 0
481
  indicates that the types are never lock-free. A value of 1 indicates
482
  that the types are sometimes lock-free. A value of 2 indicates that the
483
  types are always lock-free.
484
 
485
+ At least one signed integral specialization of the `atomic` template,
486
+ along with the specialization for the corresponding unsigned type
487
+ [[basic.fundamental]], is always lock-free.
488
+
489
+ [*Note 1*: This requirement is optional in freestanding implementations
490
+ [[compliance]]. — *end note*]
491
+
492
+ The function `atomic_is_lock_free` [[atomics.types.operations]]
493
  indicates whether the object is lock-free. In any given program
494
  execution, the result of the lock-free query shall be consistent for all
495
  pointers of the same type.
496
 
497
  Atomic operations that are not lock-free are considered to potentially
498
+ block [[intro.progress]].
499
 
500
+ [*Note 2*: Operations that are lock-free should also be address-free.
501
  That is, atomic operations on the same memory location via two different
502
  addresses will communicate atomically. The implementation should not
503
  depend on any per-process state. This restriction enables communication
504
  by memory that is mapped into a process more than once and by memory
505
  that is shared between two processes. — *end note*]
506
 
507
+ ## Waiting and notifying <a id="atomics.wait">[[atomics.wait]]</a>
508
+
509
+ *Atomic waiting operations* and *atomic notifying operations* provide a
510
+ mechanism to wait for the value of an atomic object to change more
511
+ efficiently than can be achieved with polling. An atomic waiting
512
+ operation may block until it is unblocked by an atomic notifying
513
+ operation, according to each function’s effects.
514
+
515
+ [*Note 1*: Programs are not guaranteed to observe transient atomic
516
+ values, an issue known as the A-B-A problem, resulting in continued
517
+ blocking if a condition is only temporarily met. — *end note*]
518
+
519
+ [*Note 2*:
520
+
521
+ The following functions are atomic waiting operations:
522
+
523
+ - `atomic<T>::wait`,
524
+ - `atomic_flag::wait`,
525
+ - `atomic_wait` and `atomic_wait_explicit`,
526
+ - `atomic_flag_wait` and `atomic_flag_wait_explicit`, and
527
+ - `atomic_ref<T>::wait`.
528
+
529
+ — *end note*]
530
+
531
+ [*Note 3*:
532
+
533
+ The following functions are atomic notifying operations:
534
+
535
+ - `atomic<T>::notify_one` and `atomic<T>::notify_all`,
536
+ - `atomic_flag::notify_one` and `atomic_flag::notify_all`,
537
+ - `atomic_notify_one` and `atomic_notify_all`,
538
+ - `atomic_flag_notify_one` and `atomic_flag_notify_all`, and
539
+ - `atomic_ref<T>::notify_one` and `atomic_ref<T>::notify_all`.
540
+
541
+ — *end note*]
542
+
543
+ A call to an atomic waiting operation on an atomic object `M` is
544
+ *eligible to be unblocked* by a call to an atomic notifying operation on
545
+ `M` if there exist side effects `X` and `Y` on `M` such that:
546
+
547
+ - the atomic waiting operation has blocked after observing the result of
548
+ `X`,
549
+ - `X` precedes `Y` in the modification order of `M`, and
550
+ - `Y` happens before the call to the atomic notifying operation.
551
+
552
+ ## Class template `atomic_ref` <a id="atomics.ref.generic">[[atomics.ref.generic]]</a>
553
+
554
+ ``` cpp
555
+ namespace std {
556
+ template<class T> struct atomic_ref {
557
+ private:
558
+ T* ptr; // exposition only
559
+ public:
560
+ using value_type = T;
561
+ static constexpr size_t required_alignment = implementation-defined // required alignment for atomic_ref type's operations;
562
+
563
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic_ref type's operations are always lock free;
564
+ bool is_lock_free() const noexcept;
565
+
566
+ explicit atomic_ref(T&);
567
+ atomic_ref(const atomic_ref&) noexcept;
568
+ atomic_ref& operator=(const atomic_ref&) = delete;
569
+
570
+ void store(T, memory_order = memory_order::seq_cst) const noexcept;
571
+ T operator=(T) const noexcept;
572
+ T load(memory_order = memory_order::seq_cst) const noexcept;
573
+ operator T() const noexcept;
574
+
575
+ T exchange(T, memory_order = memory_order::seq_cst) const noexcept;
576
+ bool compare_exchange_weak(T&, T,
577
+ memory_order, memory_order) const noexcept;
578
+ bool compare_exchange_strong(T&, T,
579
+ memory_order, memory_order) const noexcept;
580
+ bool compare_exchange_weak(T&, T,
581
+ memory_order = memory_order::seq_cst) const noexcept;
582
+ bool compare_exchange_strong(T&, T,
583
+ memory_order = memory_order::seq_cst) const noexcept;
584
+
585
+ void wait(T, memory_order = memory_order::seq_cst) const noexcept;
586
+ void notify_one() const noexcept;
587
+ void notify_all() const noexcept;
588
+ };
589
+ }
590
+ ```
591
+
592
+ An `atomic_ref` object applies atomic operations [[atomics.general]] to
593
+ the object referenced by `*ptr` such that, for the lifetime
594
+ [[basic.life]] of the `atomic_ref` object, the object referenced by
595
+ `*ptr` is an atomic object [[intro.races]].
596
+
597
+ The program is ill-formed if `is_trivially_copyable_v<T>` is `false`.
598
+
599
+ The lifetime [[basic.life]] of an object referenced by `*ptr` shall
600
+ exceed the lifetime of all `atomic_ref`s that reference the object.
601
+ While any `atomic_ref` instances exist that reference the `*ptr` object,
602
+ all accesses to that object shall exclusively occur through those
603
+ `atomic_ref` instances. No subobject of the object referenced by
604
+ `atomic_ref` shall be concurrently referenced by any other `atomic_ref`
605
+ object.
606
+
607
+ Atomic operations applied to an object through a referencing
608
+ `atomic_ref` are atomic with respect to atomic operations applied
609
+ through any other `atomic_ref` referencing the same object.
610
+
611
+ [*Note 1*: Atomic operations or the `atomic_ref` constructor could
612
+ acquire a shared resource, such as a lock associated with the referenced
613
+ object, to enable atomic operations to be applied to the referenced
614
+ object. — *end note*]
615
+
616
+ ### Operations <a id="atomics.ref.ops">[[atomics.ref.ops]]</a>
617
+
618
+ ``` cpp
619
+ static constexpr size_t required_alignment;
620
+ ```
621
+
622
+ The alignment required for an object to be referenced by an atomic
623
+ reference, which is at least `alignof(T)`.
624
+
625
+ [*Note 1*: Hardware could require an object referenced by an
626
+ `atomic_ref` to have stricter alignment [[basic.align]] than other
627
+ objects of type `T`. Further, whether operations on an `atomic_ref` are
628
+ lock-free could depend on the alignment of the referenced object. For
629
+ example, lock-free operations on `std::complex<double>` could be
630
+ supported only if aligned to `2*alignof(double)`. — *end note*]
631
+
632
+ ``` cpp
633
+ static constexpr bool is_always_lock_free;
634
+ ```
635
+
636
+ The static data member `is_always_lock_free` is `true` if the
637
+ `atomic_ref` type’s operations are always lock-free, and `false`
638
+ otherwise.
639
+
640
+ ``` cpp
641
+ bool is_lock_free() const noexcept;
642
+ ```
643
+
644
+ *Returns:* `true` if operations on all objects of the type
645
+ `atomic_ref<T>` are lock-free, `false` otherwise.
646
+
647
+ ``` cpp
648
+ atomic_ref(T& obj);
649
+ ```
650
+
651
+ *Preconditions:* The referenced object is aligned to
652
+ `required_alignment`.
653
+
654
+ *Ensures:* `*this` references `obj`.
655
+
656
+ *Throws:* Nothing.
657
+
658
+ ``` cpp
659
+ atomic_ref(const atomic_ref& ref) noexcept;
660
+ ```
661
+
662
+ *Ensures:* `*this` references the object referenced by `ref`.
663
+
664
+ ``` cpp
665
+ void store(T desired, memory_order order = memory_order::seq_cst) const noexcept;
666
+ ```
667
+
668
+ *Preconditions:* The `order` argument is neither
669
+ `memory_order::consume`, `memory_order::acquire`, nor
670
+ `memory_order::acq_rel`.
671
+
672
+ *Effects:* Atomically replaces the value referenced by `*ptr` with the
673
+ value of `desired`. Memory is affected according to the value of
674
+ `order`.
675
+
676
+ ``` cpp
677
+ T operator=(T desired) const noexcept;
678
+ ```
679
+
680
+ *Effects:* Equivalent to:
681
+
682
+ ``` cpp
683
+ store(desired);
684
+ return desired;
685
+ ```
686
+
687
+ ``` cpp
688
+ T load(memory_order order = memory_order::seq_cst) const noexcept;
689
+ ```
690
+
691
+ *Preconditions:* The `order` argument is neither `memory_order::release`
692
+ nor `memory_order::acq_rel`.
693
+
694
+ *Effects:* Memory is affected according to the value of `order`.
695
+
696
+ *Returns:* Atomically returns the value referenced by `*ptr`.
697
+
698
+ ``` cpp
699
+ operator T() const noexcept;
700
+ ```
701
+
702
+ *Effects:* Equivalent to: `return load();`
703
+
704
+ ``` cpp
705
+ T exchange(T desired, memory_order order = memory_order::seq_cst) const noexcept;
706
+ ```
707
+
708
+ *Effects:* Atomically replaces the value referenced by `*ptr` with
709
+ `desired`. Memory is affected according to the value of `order`. This
710
+ operation is an atomic read-modify-write
711
+ operation [[intro.multithread]].
712
+
713
+ *Returns:* Atomically returns the value referenced by `*ptr` immediately
714
+ before the effects.
715
+
716
+ ``` cpp
717
+ bool compare_exchange_weak(T& expected, T desired,
718
+ memory_order success, memory_order failure) const noexcept;
719
+
720
+ bool compare_exchange_strong(T& expected, T desired,
721
+ memory_order success, memory_order failure) const noexcept;
722
+
723
+ bool compare_exchange_weak(T& expected, T desired,
724
+ memory_order order = memory_order::seq_cst) const noexcept;
725
+
726
+ bool compare_exchange_strong(T& expected, T desired,
727
+ memory_order order = memory_order::seq_cst) const noexcept;
728
+ ```
729
+
730
+ *Preconditions:* The `failure` argument is neither
731
+ `memory_order::release` nor `memory_order::acq_rel`.
732
+
733
+ *Effects:* Retrieves the value in `expected`. It then atomically
734
+ compares the value representation of the value referenced by `*ptr` for
735
+ equality with that previously retrieved from `expected`, and if `true`,
736
+ replaces the value referenced by `*ptr` with that in `desired`. If and
737
+ only if the comparison is `true`, memory is affected according to the
738
+ value of `success`, and if the comparison is `false`, memory is affected
739
+ according to the value of `failure`. When only one `memory_order`
740
+ argument is supplied, the value of `success` is `order`, and the value
741
+ of `failure` is `order` except that a value of `memory_order::acq_rel`
742
+ shall be replaced by the value `memory_order::acquire` and a value of
743
+ `memory_order::release` shall be replaced by the value
744
+ `memory_order::relaxed`. If and only if the comparison is `false` then,
745
+ after the atomic operation, the value in `expected` is replaced by the
746
+ value read from the value referenced by `*ptr` during the atomic
747
+ comparison. If the operation returns `true`, these operations are atomic
748
+ read-modify-write operations [[intro.races]] on the value referenced by
749
+ `*ptr`. Otherwise, these operations are atomic load operations on that
750
+ memory.
751
+
752
+ *Returns:* The result of the comparison.
753
+
754
+ *Remarks:* A weak compare-and-exchange operation may fail spuriously.
755
+ That is, even when the contents of memory referred to by `expected` and
756
+ `ptr` are equal, it may return `false` and store back to `expected` the
757
+ same memory contents that were originally there.
758
+
759
+ [*Note 2*: This spurious failure enables implementation of
760
+ compare-and-exchange on a broader class of machines, e.g., load-locked
761
+ store-conditional machines. A consequence of spurious failure is that
762
+ nearly all uses of weak compare-and-exchange will be in a loop. When a
763
+ compare-and-exchange is in a loop, the weak version will yield better
764
+ performance on some platforms. When a weak compare-and-exchange would
765
+ require a loop and a strong one would not, the strong one is
766
+ preferable. — *end note*]
767
+
768
+ ``` cpp
769
+ void wait(T old, memory_order order = memory_order::seq_cst) const noexcept;
770
+ ```
771
+
772
+ *Preconditions:* `order` is neither `memory_order::release` nor
773
+ `memory_order::acq_rel`.
774
+
775
+ *Effects:* Repeatedly performs the following steps, in order:
776
+
777
+ - Evaluates `load(order)` and compares its value representation for
778
+ equality against that of `old`.
779
+ - If they compare unequal, returns.
780
+ - Blocks until it is unblocked by an atomic notifying operation or is
781
+ unblocked spuriously.
782
+
783
+ *Remarks:* This function is an atomic waiting operation [[atomics.wait]]
784
+ on atomic object `*ptr`.
785
+
786
+ ``` cpp
787
+ void notify_one() const noexcept;
788
+ ```
789
+
790
+ *Effects:* Unblocks the execution of at least one atomic waiting
791
+ operation on `*ptr` that is eligible to be unblocked [[atomics.wait]] by
792
+ this call, if any such atomic waiting operations exist.
793
+
794
+ *Remarks:* This function is an atomic notifying
795
+ operation [[atomics.wait]] on atomic object `*ptr`.
796
+
797
+ ``` cpp
798
+ void notify_all() const noexcept;
799
+ ```
800
+
801
+ *Effects:* Unblocks the execution of all atomic waiting operations on
802
+ `*ptr` that are eligible to be unblocked [[atomics.wait]] by this call.
803
+
804
+ *Remarks:* This function is an atomic notifying
805
+ operation [[atomics.wait]] on atomic object `*ptr`.
806
+
807
+ ### Specializations for integral types <a id="atomics.ref.int">[[atomics.ref.int]]</a>
808
+
809
+ There are specializations of the `atomic_ref` class template for the
810
+ integral types `char`, `signed char`, `unsigned char`, `short`,
811
+ `unsigned short`, `int`, `unsigned int`, `long`, `unsigned long`,
812
+ `long long`, `unsigned long long`, `char8_t`, `char16_t`, `char32_t`,
813
+ `wchar_t`, and any other types needed by the typedefs in the header
814
+ `<cstdint>`. For each such type `integral`, the specialization
815
+ `atomic_ref<integral>` provides additional atomic operations appropriate
816
+ to integral types.
817
+
818
+ [*Note 1*: The specialization `atomic_ref<bool>` uses the primary
819
+ template [[atomics.ref.generic]]. — *end note*]
820
+
821
+ ``` cpp
822
+ namespace std {
823
+ template<> struct atomic_ref<integral> {
824
+ private:
825
+ integral* ptr; // exposition only
826
+ public:
827
+ using value_type = integral;
828
+ using difference_type = value_type;
829
+ static constexpr size_t required_alignment = implementation-defined // required alignment for atomic_ref type's operations;
830
+
831
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic_ref type's operations are always lock free;
832
+ bool is_lock_free() const noexcept;
833
+
834
+ explicit atomic_ref(integral&);
835
+ atomic_ref(const atomic_ref&) noexcept;
836
+ atomic_ref& operator=(const atomic_ref&) = delete;
837
+
838
+ void store(integral, memory_order = memory_order::seq_cst) const noexcept;
839
+ integral operator=(integral) const noexcept;
840
+ integral load(memory_order = memory_order::seq_cst) const noexcept;
841
+ operator integral() const noexcept;
842
+
843
+ integral exchange(integral,
844
+ memory_order = memory_order::seq_cst) const noexcept;
845
+ bool compare_exchange_weak(integral&, integral,
846
+ memory_order, memory_order) const noexcept;
847
+ bool compare_exchange_strong(integral&, integral,
848
+ memory_order, memory_order) const noexcept;
849
+ bool compare_exchange_weak(integral&, integral,
850
+ memory_order = memory_order::seq_cst) const noexcept;
851
+ bool compare_exchange_strong(integral&, integral,
852
+ memory_order = memory_order::seq_cst) const noexcept;
853
+
854
+ integral fetch_add(integral,
855
+ memory_order = memory_order::seq_cst) const noexcept;
856
+ integral fetch_sub(integral,
857
+ memory_order = memory_order::seq_cst) const noexcept;
858
+ integral fetch_and(integral,
859
+ memory_order = memory_order::seq_cst) const noexcept;
860
+ integral fetch_or(integral,
861
+ memory_order = memory_order::seq_cst) const noexcept;
862
+ integral fetch_xor(integral,
863
+ memory_order = memory_order::seq_cst) const noexcept;
864
+
865
+ integral operator++(int) const noexcept;
866
+ integral operator--(int) const noexcept;
867
+ integral operator++() const noexcept;
868
+ integral operator--() const noexcept;
869
+ integral operator+=(integral) const noexcept;
870
+ integral operator-=(integral) const noexcept;
871
+ integral operator&=(integral) const noexcept;
872
+ integral operator|=(integral) const noexcept;
873
+ integral operator^=(integral) const noexcept;
874
+
875
+ void wait(integral, memory_order = memory_order::seq_cst) const noexcept;
876
+ void notify_one() const noexcept;
877
+ void notify_all() const noexcept;
878
+ };
879
+ }
880
+ ```
881
+
882
+ Descriptions are provided below only for members that differ from the
883
+ primary template.
884
+
885
+ The following operations perform arithmetic computations. The key,
886
+ operator, and computation correspondence is identified in
887
+ [[atomic.types.int.comp]].
888
+
889
+ ``` cpp
890
+ integral fetch_key(integral operand, memory_order order = memory_order::seq_cst) const noexcept;
891
+ ```
892
+
893
+ *Effects:* Atomically replaces the value referenced by `*ptr` with the
894
+ result of the computation applied to the value referenced by `*ptr` and
895
+ the given operand. Memory is affected according to the value of `order`.
896
+ These operations are atomic read-modify-write
897
+ operations [[intro.races]].
898
+
899
+ *Returns:* Atomically, the value referenced by `*ptr` immediately before
900
+ the effects.
901
+
902
+ *Remarks:* For signed integer types, the result is as if the object
903
+ value and parameters were converted to their corresponding unsigned
904
+ types, the computation performed on those types, and the result
905
+ converted back to the signed type.
906
+
907
+ [*Note 1*: There are no undefined results arising from the
908
+ computation. — *end note*]
909
+
910
+ ``` cpp
911
+ integral operator op=(integral operand) const noexcept;
912
+ ```
913
+
914
+ *Effects:* Equivalent to:
915
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
916
+
917
+ ### Specializations for floating-point types <a id="atomics.ref.float">[[atomics.ref.float]]</a>
918
+
919
+ There are specializations of the `atomic_ref` class template for the
920
+ floating-point types `float`, `double`, and `long double`. For each such
921
+ type `floating-point`, the specialization `atomic_ref<floating-point>`
922
+ provides additional atomic operations appropriate to floating-point
923
+ types.
924
+
925
+ ``` cpp
926
+ namespace std {
927
+ template<> struct atomic_ref<floating-point> {
928
+ private:
929
+ floating-point* ptr; // exposition only
930
+ public:
931
+ using value_type = floating-point;
932
+ using difference_type = value_type;
933
+ static constexpr size_t required_alignment = implementation-defined // required alignment for atomic_ref type's operations;
934
+
935
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic_ref type's operations are always lock free;
936
+ bool is_lock_free() const noexcept;
937
+
938
+ explicit atomic_ref(floating-point&);
939
+ atomic_ref(const atomic_ref&) noexcept;
940
+ atomic_ref& operator=(const atomic_ref&) = delete;
941
+
942
+ void store(floating-point, memory_order = memory_order::seq_cst) const noexcept;
943
+ floating-point operator=(floating-point) const noexcept;
944
+ floating-point load(memory_order = memory_order::seq_cst) const noexcept;
945
+ operator floating-point() const noexcept;
946
+
947
+ floating-point exchange(floating-point,
948
+ memory_order = memory_order::seq_cst) const noexcept;
949
+ bool compare_exchange_weak(floating-point&, floating-point,
950
+ memory_order, memory_order) const noexcept;
951
+ bool compare_exchange_strong(floating-point&, floating-point,
952
+ memory_order, memory_order) const noexcept;
953
+ bool compare_exchange_weak(floating-point&, floating-point,
954
+ memory_order = memory_order::seq_cst) const noexcept;
955
+ bool compare_exchange_strong(floating-point&, floating-point,
956
+ memory_order = memory_order::seq_cst) const noexcept;
957
+
958
+ floating-point fetch_add(floating-point,
959
+ memory_order = memory_order::seq_cst) const noexcept;
960
+ floating-point fetch_sub(floating-point,
961
+ memory_order = memory_order::seq_cst) const noexcept;
962
+
963
+ floating-point operator+=(floating-point) const noexcept;
964
+ floating-point operator-=(floating-point) const noexcept;
965
+
966
+ void wait(floating-point, memory_order = memory_order::seq_cst) const noexcept;
967
+ void notify_one() const noexcept;
968
+ void notify_all() const noexcept;
969
+ };
970
+ }
971
+ ```
972
+
973
+ Descriptions are provided below only for members that differ from the
974
+ primary template.
975
+
976
+ The following operations perform arithmetic computations. The key,
977
+ operator, and computation correspondence are identified in
978
+ [[atomic.types.int.comp]].
979
+
980
+ ``` cpp
981
+ floating-point fetch_key(floating-point operand,
982
+ memory_order order = memory_order::seq_cst) const noexcept;
983
+ ```
984
+
985
+ *Effects:* Atomically replaces the value referenced by `*ptr` with the
986
+ result of the computation applied to the value referenced by `*ptr` and
987
+ the given operand. Memory is affected according to the value of `order`.
988
+ These operations are atomic read-modify-write
989
+ operations [[intro.races]].
990
+
991
+ *Returns:* Atomically, the value referenced by `*ptr` immediately before
992
+ the effects.
993
+
994
+ *Remarks:* If the result is not a representable value for its
995
+ type [[expr.pre]], the result is unspecified, but the operations
996
+ otherwise have no undefined behavior. Atomic arithmetic operations on
997
+ *`floating-point`* should conform to the
998
+ `std::numeric_limits<`*`floating-point`*`>` traits associated with the
999
+ floating-point type [[limits.syn]]. The floating-point
1000
+ environment [[cfenv]] for atomic arithmetic operations on
1001
+ *`floating-point`* may be different than the calling thread’s
1002
+ floating-point environment.
1003
+
1004
+ ``` cpp
1005
+ floating-point operator op=(floating-point operand) const noexcept;
1006
+ ```
1007
+
1008
+ *Effects:* Equivalent to:
1009
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
1010
+
1011
+ ### Partial specialization for pointers <a id="atomics.ref.pointer">[[atomics.ref.pointer]]</a>
1012
+
1013
+ ``` cpp
1014
+ namespace std {
1015
+ template<class T> struct atomic_ref<T*> {
1016
+ private:
1017
+ T** ptr; // exposition only
1018
+ public:
1019
+ using value_type = T*;
1020
+ using difference_type = ptrdiff_t;
1021
+ static constexpr size_t required_alignment = implementation-defined // required alignment for atomic_ref type's operations;
1022
+
1023
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic_ref type's operations are always lock free;
1024
+ bool is_lock_free() const noexcept;
1025
+
1026
+ explicit atomic_ref(T*&);
1027
+ atomic_ref(const atomic_ref&) noexcept;
1028
+ atomic_ref& operator=(const atomic_ref&) = delete;
1029
+
1030
+ void store(T*, memory_order = memory_order::seq_cst) const noexcept;
1031
+ T* operator=(T*) const noexcept;
1032
+ T* load(memory_order = memory_order::seq_cst) const noexcept;
1033
+ operator T*() const noexcept;
1034
+
1035
+ T* exchange(T*, memory_order = memory_order::seq_cst) const noexcept;
1036
+ bool compare_exchange_weak(T*&, T*,
1037
+ memory_order, memory_order) const noexcept;
1038
+ bool compare_exchange_strong(T*&, T*,
1039
+ memory_order, memory_order) const noexcept;
1040
+ bool compare_exchange_weak(T*&, T*,
1041
+ memory_order = memory_order::seq_cst) const noexcept;
1042
+ bool compare_exchange_strong(T*&, T*,
1043
+ memory_order = memory_order::seq_cst) const noexcept;
1044
+
1045
+ T* fetch_add(difference_type, memory_order = memory_order::seq_cst) const noexcept;
1046
+ T* fetch_sub(difference_type, memory_order = memory_order::seq_cst) const noexcept;
1047
+
1048
+ T* operator++(int) const noexcept;
1049
+ T* operator--(int) const noexcept;
1050
+ T* operator++() const noexcept;
1051
+ T* operator--() const noexcept;
1052
+ T* operator+=(difference_type) const noexcept;
1053
+ T* operator-=(difference_type) const noexcept;
1054
+
1055
+ void wait(T*, memory_order = memory_order::seq_cst) const noexcept;
1056
+ void notify_one() const noexcept;
1057
+ void notify_all() const noexcept;
1058
+ };
1059
+ }
1060
+ ```
1061
+
1062
+ Descriptions are provided below only for members that differ from the
1063
+ primary template.
1064
+
1065
+ The following operations perform arithmetic computations. The key,
1066
+ operator, and computation correspondence is identified in
1067
+ [[atomic.types.pointer.comp]].
1068
+
1069
+ ``` cpp
1070
+ T* fetch_key(difference_type operand, memory_order order = memory_order::seq_cst) const noexcept;
1071
+ ```
1072
+
1073
+ *Mandates:* `T` is a complete object type.
1074
+
1075
+ *Effects:* Atomically replaces the value referenced by `*ptr` with the
1076
+ result of the computation applied to the value referenced by `*ptr` and
1077
+ the given operand. Memory is affected according to the value of `order`.
1078
+ These operations are atomic read-modify-write
1079
+ operations [[intro.races]].
1080
+
1081
+ *Returns:* Atomically, the value referenced by `*ptr` immediately before
1082
+ the effects.
1083
+
1084
+ *Remarks:* The result may be an undefined address, but the operations
1085
+ otherwise have no undefined behavior.
1086
+
1087
+ ``` cpp
1088
+ T* operator op=(difference_type operand) const noexcept;
1089
+ ```
1090
+
1091
+ *Effects:* Equivalent to:
1092
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
1093
+
1094
+ ### Member operators common to integers and pointers to objects <a id="atomics.ref.memop">[[atomics.ref.memop]]</a>
1095
+
1096
+ ``` cpp
1097
+ value_type operator++(int) const noexcept;
1098
+ ```
1099
+
1100
+ *Effects:* Equivalent to: `return fetch_add(1);`
1101
+
1102
+ ``` cpp
1103
+ value_type operator--(int) const noexcept;
1104
+ ```
1105
+
1106
+ *Effects:* Equivalent to: `return fetch_sub(1);`
1107
+
1108
+ ``` cpp
1109
+ value_type operator++() const noexcept;
1110
+ ```
1111
+
1112
+ *Effects:* Equivalent to: `return fetch_add(1) + 1;`
1113
+
1114
+ ``` cpp
1115
+ value_type operator--() const noexcept;
1116
+ ```
1117
+
1118
+ *Effects:* Equivalent to: `return fetch_sub(1) - 1;`
1119
+
1120
  ## Class template `atomic` <a id="atomics.types.generic">[[atomics.types.generic]]</a>
1121
 
1122
  ``` cpp
1123
  namespace std {
1124
  template<class T> struct atomic {
1125
  using value_type = T;
1126
+
1127
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
1128
  bool is_lock_free() const volatile noexcept;
1129
  bool is_lock_free() const noexcept;
1130
+
1131
+ // [atomics.types.operations], operations on atomic types
1132
+ constexpr atomic() noexcept(is_nothrow_default_constructible_v<T>);
1133
+ constexpr atomic(T) noexcept;
1134
+ atomic(const atomic&) = delete;
1135
+ atomic& operator=(const atomic&) = delete;
1136
+ atomic& operator=(const atomic&) volatile = delete;
1137
+
1138
+ T load(memory_order = memory_order::seq_cst) const volatile noexcept;
1139
+ T load(memory_order = memory_order::seq_cst) const noexcept;
1140
  operator T() const volatile noexcept;
1141
  operator T() const noexcept;
1142
+ void store(T, memory_order = memory_order::seq_cst) volatile noexcept;
1143
+ void store(T, memory_order = memory_order::seq_cst) noexcept;
1144
+ T operator=(T) volatile noexcept;
1145
+ T operator=(T) noexcept;
1146
+
1147
+ T exchange(T, memory_order = memory_order::seq_cst) volatile noexcept;
1148
+ T exchange(T, memory_order = memory_order::seq_cst) noexcept;
1149
  bool compare_exchange_weak(T&, T, memory_order, memory_order) volatile noexcept;
1150
  bool compare_exchange_weak(T&, T, memory_order, memory_order) noexcept;
1151
  bool compare_exchange_strong(T&, T, memory_order, memory_order) volatile noexcept;
1152
  bool compare_exchange_strong(T&, T, memory_order, memory_order) noexcept;
1153
+ bool compare_exchange_weak(T&, T, memory_order = memory_order::seq_cst) volatile noexcept;
1154
+ bool compare_exchange_weak(T&, T, memory_order = memory_order::seq_cst) noexcept;
1155
+ bool compare_exchange_strong(T&, T, memory_order = memory_order::seq_cst) volatile noexcept;
1156
+ bool compare_exchange_strong(T&, T, memory_order = memory_order::seq_cst) noexcept;
1157
 
1158
+ void wait(T, memory_order = memory_order::seq_cst) const volatile noexcept;
1159
+ void wait(T, memory_order = memory_order::seq_cst) const noexcept;
1160
+ void notify_one() volatile noexcept;
1161
+ void notify_one() noexcept;
1162
+ void notify_all() volatile noexcept;
1163
+ void notify_all() noexcept;
 
1164
  };
1165
  }
1166
  ```
1167
 
1168
+ The template argument for `T` shall meet the *Cpp17CopyConstructible*
1169
+ and *Cpp17CopyAssignable* requirements. The program is ill-formed if any
1170
+ of
1171
+
1172
+ - `is_trivially_copyable_v<T>`,
1173
+ - `is_copy_constructible_v<T>`,
1174
+ - `is_move_constructible_v<T>`,
1175
+ - `is_copy_assignable_v<T>`, or
1176
+ - `is_move_assignable_v<T>`
1177
+
1178
+ is `false`.
1179
 
1180
  [*Note 1*: Type arguments that are not also statically initializable
1181
  may be difficult to use. — *end note*]
1182
 
1183
  The specialization `atomic<bool>` is a standard-layout struct.
1184
 
1185
  [*Note 2*: The representation of an atomic specialization need not have
1186
+ the same size and alignment requirement as its corresponding argument
1187
+ type. *end note*]
 
1188
 
1189
  ### Operations on atomic types <a id="atomics.types.operations">[[atomics.types.operations]]</a>
1190
 
 
 
 
 
 
 
1191
  ``` cpp
1192
+ constexpr atomic() noexcept(is_nothrow_default_constructible_v<T>);
1193
  ```
1194
 
1195
+ *Mandates:* `is_default_constructible_v<T>` is `true`.
1196
 
1197
+ *Effects:* Initializes the atomic object with the value of `T()`.
1198
+ Initialization is not an atomic operation [[intro.multithread]].
1199
 
1200
  ``` cpp
1201
  constexpr atomic(T desired) noexcept;
1202
  ```
1203
 
1204
  *Effects:* Initializes the object with the value `desired`.
1205
+ Initialization is not an atomic operation [[intro.multithread]].
1206
 
1207
+ [*Note 1*: It is possible to have an access to an atomic object `A`
1208
  race with its construction, for example by communicating the address of
1209
  the just-constructed object `A` to another thread via
1210
+ `memory_order::relaxed` operations on a suitable atomic pointer
1211
+ variable, and then immediately accessing `A` in the receiving thread.
1212
+ This results in undefined behavior. — *end note*]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1213
 
1214
  ``` cpp
1215
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
1216
  ```
1217
 
1218
  The `static` data member `is_always_lock_free` is `true` if the atomic
1219
  type’s operations are always lock-free, and `false` otherwise.
1220
 
1221
+ [*Note 2*: The value of `is_always_lock_free` is consistent with the
1222
  value of the corresponding `ATOMIC_..._LOCK_FREE` macro, if
1223
  defined. — *end note*]
1224
 
1225
  ``` cpp
1226
  bool is_lock_free() const volatile noexcept;
 
1228
  ```
1229
 
1230
  *Returns:* `true` if the object’s operations are lock-free, `false`
1231
  otherwise.
1232
 
1233
+ [*Note 3*: The return value of the `is_lock_free` member function is
1234
  consistent with the value of `is_always_lock_free` for the same
1235
  type. — *end note*]
1236
 
1237
  ``` cpp
1238
+ void store(T desired, memory_order order = memory_order::seq_cst) volatile noexcept;
1239
+ void store(T desired, memory_order order = memory_order::seq_cst) noexcept;
1240
  ```
1241
 
1242
+ *Preconditions:* The `order` argument is neither
1243
+ `memory_order::consume`, `memory_order::acquire`, nor
1244
+ `memory_order::acq_rel`.
1245
+
1246
+ *Constraints:* For the `volatile` overload of this function,
1247
+ `is_always_lock_free` is `true`.
1248
 
1249
  *Effects:* Atomically replaces the value pointed to by `this` with the
1250
  value of `desired`. Memory is affected according to the value of
1251
  `order`.
1252
 
1253
  ``` cpp
1254
  T operator=(T desired) volatile noexcept;
1255
  T operator=(T desired) noexcept;
1256
  ```
1257
 
1258
+ *Constraints:* For the `volatile` overload of this function,
1259
+ `is_always_lock_free` is `true`.
1260
+
1261
+ *Effects:* Equivalent to `store(desired)`.
1262
 
1263
  *Returns:* `desired`.
1264
 
1265
  ``` cpp
1266
+ T load(memory_order order = memory_order::seq_cst) const volatile noexcept;
1267
+ T load(memory_order order = memory_order::seq_cst) const noexcept;
1268
  ```
1269
 
1270
+ *Preconditions:* The `order` argument is neither `memory_order::release`
1271
+ nor `memory_order::acq_rel`.
1272
+
1273
+ *Constraints:* For the `volatile` overload of this function,
1274
+ `is_always_lock_free` is `true`.
1275
 
1276
  *Effects:* Memory is affected according to the value of `order`.
1277
 
1278
  *Returns:* Atomically returns the value pointed to by `this`.
1279
 
1280
  ``` cpp
1281
  operator T() const volatile noexcept;
1282
  operator T() const noexcept;
1283
  ```
1284
 
1285
+ *Constraints:* For the `volatile` overload of this function,
1286
+ `is_always_lock_free` is `true`.
1287
+
1288
  *Effects:* Equivalent to: `return load();`
1289
 
1290
  ``` cpp
1291
+ T exchange(T desired, memory_order order = memory_order::seq_cst) volatile noexcept;
1292
+ T exchange(T desired, memory_order order = memory_order::seq_cst) noexcept;
1293
  ```
1294
 
1295
+ *Constraints:* For the `volatile` overload of this function,
1296
+ `is_always_lock_free` is `true`.
1297
+
1298
  *Effects:* Atomically replaces the value pointed to by `this` with
1299
  `desired`. Memory is affected according to the value of `order`. These
1300
  operations are atomic read-modify-write
1301
+ operations [[intro.multithread]].
1302
 
1303
  *Returns:* Atomically returns the value pointed to by `this` immediately
1304
  before the effects.
1305
 
1306
  ``` cpp
 
1311
  bool compare_exchange_strong(T& expected, T desired,
1312
  memory_order success, memory_order failure) volatile noexcept;
1313
  bool compare_exchange_strong(T& expected, T desired,
1314
  memory_order success, memory_order failure) noexcept;
1315
  bool compare_exchange_weak(T& expected, T desired,
1316
+ memory_order order = memory_order::seq_cst) volatile noexcept;
1317
  bool compare_exchange_weak(T& expected, T desired,
1318
+ memory_order order = memory_order::seq_cst) noexcept;
1319
  bool compare_exchange_strong(T& expected, T desired,
1320
+ memory_order order = memory_order::seq_cst) volatile noexcept;
1321
  bool compare_exchange_strong(T& expected, T desired,
1322
+ memory_order order = memory_order::seq_cst) noexcept;
1323
  ```
1324
 
1325
+ *Preconditions:* The `failure` argument is neither
1326
+ `memory_order::release` nor `memory_order::acq_rel`.
1327
+
1328
+ *Constraints:* For the `volatile` overload of this function,
1329
+ `is_always_lock_free` is `true`.
1330
 
1331
  *Effects:* Retrieves the value in `expected`. It then atomically
1332
+ compares the value representation of the value pointed to by `this` for
1333
+ equality with that previously retrieved from `expected`, and if true,
1334
+ replaces the value pointed to by `this` with that in `desired`. If and
1335
+ only if the comparison is `true`, memory is affected according to the
1336
+ value of `success`, and if the comparison is false, memory is affected
1337
+ according to the value of `failure`. When only one `memory_order`
1338
+ argument is supplied, the value of `success` is `order`, and the value
1339
+ of `failure` is `order` except that a value of `memory_order::acq_rel`
1340
+ shall be replaced by the value `memory_order::acquire` and a value of
1341
+ `memory_order::release` shall be replaced by the value
1342
+ `memory_order::relaxed`. If and only if the comparison is false then,
1343
+ after the atomic operation, the value in `expected` is replaced by the
1344
+ value pointed to by `this` during the atomic comparison. If the
1345
+ operation returns `true`, these operations are atomic read-modify-write
1346
+ operations [[intro.multithread]] on the memory pointed to by `this`.
 
1347
  Otherwise, these operations are atomic load operations on that memory.
1348
 
1349
  *Returns:* The result of the comparison.
1350
 
1351
+ [*Note 4*:
1352
 
1353
+ For example, the effect of `compare_exchange_strong` on objects without
1354
+ padding bits [[basic.types]] is
1355
 
1356
  ``` cpp
1357
  if (memcmp(this, &expected, sizeof(*this)) == 0)
1358
  memcpy(this, &desired, sizeof(*this));
1359
  else
1360
  memcpy(expected, this, sizeof(*this));
1361
  ```
1362
 
1363
  — *end note*]
1364
 
1365
+ [*Example 1*:
1366
 
1367
  The expected use of the compare-and-exchange operations is as follows.
1368
  The compare-and-exchange operations will update `expected` when another
1369
  iteration of the loop is needed.
1370
 
 
1375
  } while (!current.compare_exchange_weak(expected, desired));
1376
  ```
1377
 
1378
  — *end example*]
1379
 
1380
+ [*Example 2*:
1381
 
1382
  Because the expected value is updated only on failure, code releasing
1383
+ the memory containing the `expected` value on success will work. For
1384
+ example, list head insertion will act atomically and would not introduce
1385
+ a data race in the following code:
1386
 
1387
  ``` cpp
1388
  do {
1389
  p->next = head; // make new list node point to the current head
1390
  } while (!head.compare_exchange_weak(p->next, p)); // try to insert
 
1400
  *Remarks:* A weak compare-and-exchange operation may fail spuriously.
1401
  That is, even when the contents of memory referred to by `expected` and
1402
  `this` are equal, it may return `false` and store back to `expected` the
1403
  same memory contents that were originally there.
1404
 
1405
+ [*Note 5*: This spurious failure enables implementation of
1406
  compare-and-exchange on a broader class of machines, e.g., load-locked
1407
  store-conditional machines. A consequence of spurious failure is that
1408
  nearly all uses of weak compare-and-exchange will be in a loop. When a
1409
  compare-and-exchange is in a loop, the weak version will yield better
1410
  performance on some platforms. When a weak compare-and-exchange would
1411
  require a loop and a strong one would not, the strong one is
1412
  preferable. — *end note*]
1413
 
1414
+ [*Note 6*: Under cases where the `memcpy` and `memcmp` semantics of the
1415
+ compare-and-exchange operations apply, the outcome might be failed
1416
+ comparisons for values that compare equal with `operator==` if the value
1417
+ representation has trap bits or alternate representations of the same
1418
+ value. Notably, on implementations conforming to ISO/IEC/IEEE 60559,
1419
+ floating-point `-0.0` and `+0.0` will not compare equal with `memcmp`
1420
+ but will compare equal with `operator==`, and NaNs with the same payload
1421
+ will compare equal with `memcmp` but will not compare equal with
1422
+ `operator==`. — *end note*]
1423
+
1424
+ [*Note 7*:
1425
+
1426
+ Because compare-and-exchange acts on an object’s value representation,
1427
+ padding bits that never participate in the object’s value representation
1428
+ are ignored. As a consequence, the following code is guaranteed to avoid
1429
+ spurious failure:
1430
+
1431
+ ``` cpp
1432
+ struct padded {
1433
+ char clank = 0x42;
1434
+ // Padding here.
1435
+ unsigned biff = 0xC0DEFEFE;
1436
+ };
1437
+ atomic<padded> pad = {};
1438
+
1439
+ bool zap() {
1440
+ padded expected, desired{0, 0};
1441
+ return pad.compare_exchange_strong(expected, desired);
1442
+ }
1443
+ ```
1444
+
1445
+ — *end note*]
1446
+
1447
+ [*Note 8*:
1448
+
1449
+ For a union with bits that participate in the value representation of
1450
+ some members but not others, compare-and-exchange might always fail.
1451
+ This is because such padding bits have an indeterminate value when they
1452
+ do not participate in the value representation of the active member. As
1453
+ a consequence, the following code is not guaranteed to ever succeed:
1454
+
1455
+ ``` cpp
1456
+ union pony {
1457
+ double celestia = 0.;
1458
+ short luna; // padded
1459
+ };
1460
+ atomic<pony> princesses = {};
1461
+
1462
+ bool party(pony desired) {
1463
+ pony expected;
1464
+ return princesses.compare_exchange_strong(expected, desired);
1465
+ }
1466
+ ```
1467
+
1468
+ — *end note*]
1469
+
1470
+ ``` cpp
1471
+ void wait(T old, memory_order order = memory_order::seq_cst) const volatile noexcept;
1472
+ void wait(T old, memory_order order = memory_order::seq_cst) const noexcept;
1473
+ ```
1474
+
1475
+ *Preconditions:* `order` is neither `memory_order::release` nor
1476
+ `memory_order::acq_rel`.
1477
+
1478
+ *Effects:* Repeatedly performs the following steps, in order:
1479
+
1480
+ - Evaluates `load(order)` and compares its value representation for
1481
+ equality against that of `old`.
1482
+ - If they compare unequal, returns.
1483
+ - Blocks until it is unblocked by an atomic notifying operation or is
1484
+ unblocked spuriously.
1485
+
1486
+ *Remarks:* This function is an atomic waiting
1487
+ operation [[atomics.wait]].
1488
+
1489
+ ``` cpp
1490
+ void notify_one() volatile noexcept;
1491
+ void notify_one() noexcept;
1492
+ ```
1493
+
1494
+ *Effects:* Unblocks the execution of at least one atomic waiting
1495
+ operation that is eligible to be unblocked [[atomics.wait]] by this
1496
+ call, if any such atomic waiting operations exist.
1497
+
1498
+ *Remarks:* This function is an atomic notifying
1499
+ operation [[atomics.wait]].
1500
+
1501
+ ``` cpp
1502
+ void notify_all() volatile noexcept;
1503
+ void notify_all() noexcept;
1504
+ ```
1505
+
1506
+ *Effects:* Unblocks the execution of all atomic waiting operations that
1507
+ are eligible to be unblocked [[atomics.wait]] by this call.
1508
+
1509
+ *Remarks:* This function is an atomic notifying
1510
+ operation [[atomics.wait]].
1511
 
1512
  ### Specializations for integers <a id="atomics.types.int">[[atomics.types.int]]</a>
1513
 
1514
+ There are specializations of the `atomic` class template for the
1515
+ integral types `char`, `signed char`, `unsigned char`, `short`,
1516
+ `unsigned short`, `int`, `unsigned int`, `long`, `unsigned long`,
1517
+ `long long`, `unsigned long long`, `char8_t`, `char16_t`, `char32_t`,
1518
+ `wchar_t`, and any other types needed by the typedefs in the header
1519
+ `<cstdint>`. For each such type `integral`, the specialization
1520
+ `atomic<integral>` provides additional atomic operations appropriate to
1521
+ integral types.
1522
 
1523
+ [*Note 1*: The specialization `atomic<bool>` uses the primary template
1524
  [[atomics.types.generic]]. — *end note*]
1525
 
1526
  ``` cpp
1527
  namespace std {
1528
  template<> struct atomic<integral> {
1529
  using value_type = integral;
1530
  using difference_type = value_type;
1531
+
1532
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
1533
  bool is_lock_free() const volatile noexcept;
1534
  bool is_lock_free() const noexcept;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1535
 
1536
+ constexpr atomic() noexcept;
1537
  constexpr atomic(integral) noexcept;
1538
  atomic(const atomic&) = delete;
1539
  atomic& operator=(const atomic&) = delete;
1540
  atomic& operator=(const atomic&) volatile = delete;
1541
+
1542
+ void store(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1543
+ void store(integral, memory_order = memory_order::seq_cst) noexcept;
1544
  integral operator=(integral) volatile noexcept;
1545
  integral operator=(integral) noexcept;
1546
+ integral load(memory_order = memory_order::seq_cst) const volatile noexcept;
1547
+ integral load(memory_order = memory_order::seq_cst) const noexcept;
1548
+ operator integral() const volatile noexcept;
1549
+ operator integral() const noexcept;
1550
+
1551
+ integral exchange(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1552
+ integral exchange(integral, memory_order = memory_order::seq_cst) noexcept;
1553
+ bool compare_exchange_weak(integral&, integral,
1554
+ memory_order, memory_order) volatile noexcept;
1555
+ bool compare_exchange_weak(integral&, integral,
1556
+ memory_order, memory_order) noexcept;
1557
+ bool compare_exchange_strong(integral&, integral,
1558
+ memory_order, memory_order) volatile noexcept;
1559
+ bool compare_exchange_strong(integral&, integral,
1560
+ memory_order, memory_order) noexcept;
1561
+ bool compare_exchange_weak(integral&, integral,
1562
+ memory_order = memory_order::seq_cst) volatile noexcept;
1563
+ bool compare_exchange_weak(integral&, integral,
1564
+ memory_order = memory_order::seq_cst) noexcept;
1565
+ bool compare_exchange_strong(integral&, integral,
1566
+ memory_order = memory_order::seq_cst) volatile noexcept;
1567
+ bool compare_exchange_strong(integral&, integral,
1568
+ memory_order = memory_order::seq_cst) noexcept;
1569
+
1570
+ integral fetch_add(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1571
+ integral fetch_add(integral, memory_order = memory_order::seq_cst) noexcept;
1572
+ integral fetch_sub(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1573
+ integral fetch_sub(integral, memory_order = memory_order::seq_cst) noexcept;
1574
+ integral fetch_and(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1575
+ integral fetch_and(integral, memory_order = memory_order::seq_cst) noexcept;
1576
+ integral fetch_or(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1577
+ integral fetch_or(integral, memory_order = memory_order::seq_cst) noexcept;
1578
+ integral fetch_xor(integral, memory_order = memory_order::seq_cst) volatile noexcept;
1579
+ integral fetch_xor(integral, memory_order = memory_order::seq_cst) noexcept;
1580
 
1581
  integral operator++(int) volatile noexcept;
1582
  integral operator++(int) noexcept;
1583
  integral operator--(int) volatile noexcept;
1584
  integral operator--(int) noexcept;
 
1594
  integral operator&=(integral) noexcept;
1595
  integral operator|=(integral) volatile noexcept;
1596
  integral operator|=(integral) noexcept;
1597
  integral operator^=(integral) volatile noexcept;
1598
  integral operator^=(integral) noexcept;
1599
+
1600
+ void wait(integral, memory_order = memory_order::seq_cst) const volatile noexcept;
1601
+ void wait(integral, memory_order = memory_order::seq_cst) const noexcept;
1602
+ void notify_one() volatile noexcept;
1603
+ void notify_one() noexcept;
1604
+ void notify_all() volatile noexcept;
1605
+ void notify_all() noexcept;
1606
  };
1607
  }
1608
  ```
1609
 
1610
  The atomic integral specializations are standard-layout structs. They
1611
+ each have a trivial destructor.
1612
 
1613
  Descriptions are provided below only for members that differ from the
1614
  primary template.
1615
 
1616
  The following operations perform arithmetic computations. The key,
1617
  operator, and computation correspondence is:
1618
 
1619
+ **Table: Atomic arithmetic computations** <a id="atomic.types.int.comp">[atomic.types.int.comp]</a>
1620
 
1621
  | | | | | | |
1622
  | ----- | --- | -------------------- | ----- | --- | -------------------- |
1623
  | `add` | `+` | addition | `sub` | `-` | subtraction |
1624
  | `or` | `|` | bitwise inclusive or | `xor` | `^` | bitwise exclusive or |
1625
  | `and` | `&` | bitwise and | | | |
1626
 
1627
  ``` cpp
1628
+ T fetch_key(T operand, memory_order order = memory_order::seq_cst) volatile noexcept;
1629
+ T fetch_key(T operand, memory_order order = memory_order::seq_cst) noexcept;
1630
  ```
1631
 
1632
+ *Constraints:* For the `volatile` overload of this function,
1633
+ `is_always_lock_free` is `true`.
1634
+
1635
  *Effects:* Atomically replaces the value pointed to by `this` with the
1636
  result of the computation applied to the value pointed to by `this` and
1637
  the given `operand`. Memory is affected according to the value of
1638
  `order`. These operations are atomic read-modify-write
1639
+ operations [[intro.multithread]].
1640
 
1641
  *Returns:* Atomically, the value pointed to by `this` immediately before
1642
  the effects.
1643
 
1644
+ *Remarks:* For signed integer types, the result is as if the object
1645
+ value and parameters were converted to their corresponding unsigned
1646
+ types, the computation performed on those types, and the result
1647
+ converted back to the signed type.
1648
+
1649
+ [*Note 1*: There are no undefined results arising from the
1650
+ computation. — *end note*]
1651
 
1652
  ``` cpp
1653
  T operator op=(T operand) volatile noexcept;
1654
  T operator op=(T operand) noexcept;
1655
  ```
1656
 
1657
+ *Constraints:* For the `volatile` overload of this function,
1658
+ `is_always_lock_free` is `true`.
1659
+
1660
  *Effects:* Equivalent to:
1661
  `return fetch_`*`key`*`(operand) `*`op`*` operand;`
1662
 
1663
+ ### Specializations for floating-point types <a id="atomics.types.float">[[atomics.types.float]]</a>
1664
+
1665
+ There are specializations of the `atomic` class template for the
1666
+ floating-point types `float`, `double`, and `long double`. For each such
1667
+ type `floating-point`, the specialization `atomic<floating-point>`
1668
+ provides additional atomic operations appropriate to floating-point
1669
+ types.
1670
+
1671
+ ``` cpp
1672
+ namespace std {
1673
+ template<> struct atomic<floating-point> {
1674
+ using value_type = floating-point;
1675
+ using difference_type = value_type;
1676
+
1677
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
1678
+ bool is_lock_free() const volatile noexcept;
1679
+ bool is_lock_free() const noexcept;
1680
+
1681
+ constexpr atomic() noexcept;
1682
+ constexpr atomic(floating-point) noexcept;
1683
+ atomic(const atomic&) = delete;
1684
+ atomic& operator=(const atomic&) = delete;
1685
+ atomic& operator=(const atomic&) volatile = delete;
1686
+
1687
+ void store(floating-point, memory_order = memory_order::seq_cst) volatile noexcept;
1688
+ void store(floating-point, memory_order = memory_order::seq_cst) noexcept;
1689
+ floating-point operator=(floating-point) volatile noexcept;
1690
+ floating-point operator=(floating-point) noexcept;
1691
+ floating-point load(memory_order = memory_order::seq_cst) volatile noexcept;
1692
+ floating-point load(memory_order = memory_order::seq_cst) noexcept;
1693
+ operator floating-point() volatile noexcept;
1694
+ operator floating-point() noexcept;
1695
+
1696
+ floating-point exchange(floating-point,
1697
+ memory_order = memory_order::seq_cst) volatile noexcept;
1698
+ floating-point exchange(floating-point,
1699
+ memory_order = memory_order::seq_cst) noexcept;
1700
+ bool compare_exchange_weak(floating-point&, floating-point,
1701
+ memory_order, memory_order) volatile noexcept;
1702
+ bool compare_exchange_weak(floating-point&, floating-point,
1703
+ memory_order, memory_order) noexcept;
1704
+ bool compare_exchange_strong(floating-point&, floating-point,
1705
+ memory_order, memory_order) volatile noexcept;
1706
+ bool compare_exchange_strong(floating-point&, floating-point,
1707
+ memory_order, memory_order) noexcept;
1708
+ bool compare_exchange_weak(floating-point&, floating-point,
1709
+ memory_order = memory_order::seq_cst) volatile noexcept;
1710
+ bool compare_exchange_weak(floating-point&, floating-point,
1711
+ memory_order = memory_order::seq_cst) noexcept;
1712
+ bool compare_exchange_strong(floating-point&, floating-point,
1713
+ memory_order = memory_order::seq_cst) volatile noexcept;
1714
+ bool compare_exchange_strong(floating-point&, floating-point,
1715
+ memory_order = memory_order::seq_cst) noexcept;
1716
+
1717
+ floating-point fetch_add(floating-point,
1718
+ memory_order = memory_order::seq_cst) volatile noexcept;
1719
+ floating-point fetch_add(floating-point,
1720
+ memory_order = memory_order::seq_cst) noexcept;
1721
+ floating-point fetch_sub(floating-point,
1722
+ memory_order = memory_order::seq_cst) volatile noexcept;
1723
+ floating-point fetch_sub(floating-point,
1724
+ memory_order = memory_order::seq_cst) noexcept;
1725
+
1726
+ floating-point operator+=(floating-point) volatile noexcept;
1727
+ floating-point operator+=(floating-point) noexcept;
1728
+ floating-point operator-=(floating-point) volatile noexcept;
1729
+ floating-point operator-=(floating-point) noexcept;
1730
+
1731
+ void wait(floating-point, memory_order = memory_order::seq_cst) const volatile noexcept;
1732
+ void wait(floating-point, memory_order = memory_order::seq_cst) const noexcept;
1733
+ void notify_one() volatile noexcept;
1734
+ void notify_one() noexcept;
1735
+ void notify_all() volatile noexcept;
1736
+ void notify_all() noexcept;
1737
+ };
1738
+ }
1739
+ ```
1740
+
1741
+ The atomic floating-point specializations are standard-layout structs.
1742
+ They each have a trivial destructor.
1743
+
1744
+ Descriptions are provided below only for members that differ from the
1745
+ primary template.
1746
+
1747
+ The following operations perform arithmetic addition and subtraction
1748
+ computations. The key, operator, and computation correspondence are
1749
+ identified in [[atomic.types.int.comp]].
1750
+
1751
+ ``` cpp
1752
+ T fetch_key(T operand, memory_order order = memory_order::seq_cst) volatile noexcept;
1753
+ T fetch_key(T operand, memory_order order = memory_order::seq_cst) noexcept;
1754
+ ```
1755
+
1756
+ *Constraints:* For the `volatile` overload of this function,
1757
+ `is_always_lock_free` is `true`.
1758
+
1759
+ *Effects:* Atomically replaces the value pointed to by `this` with the
1760
+ result of the computation applied to the value pointed to by `this` and
1761
+ the given `operand`. Memory is affected according to the value of
1762
+ `order`. These operations are atomic read-modify-write
1763
+ operations [[intro.multithread]].
1764
+
1765
+ *Returns:* Atomically, the value pointed to by `this` immediately before
1766
+ the effects.
1767
+
1768
+ *Remarks:* If the result is not a representable value for its
1769
+ type [[expr.pre]] the result is unspecified, but the operations
1770
+ otherwise have no undefined behavior. Atomic arithmetic operations on
1771
+ *`floating-point`* should conform to the
1772
+ `std::numeric_limits<`*`floating-point`*`>` traits associated with the
1773
+ floating-point type [[limits.syn]]. The floating-point
1774
+ environment [[cfenv]] for atomic arithmetic operations on
1775
+ *`floating-point`* may be different than the calling thread’s
1776
+ floating-point environment.
1777
+
1778
+ ``` cpp
1779
+ T operator op=(T operand) volatile noexcept;
1780
+ T operator op=(T operand) noexcept;
1781
+ ```
1782
+
1783
+ *Constraints:* For the `volatile` overload of this function,
1784
+ `is_always_lock_free` is `true`.
1785
+
1786
+ *Effects:* Equivalent to:
1787
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
1788
+
1789
+ *Remarks:* If the result is not a representable value for its
1790
+ type [[expr.pre]] the result is unspecified, but the operations
1791
+ otherwise have no undefined behavior. Atomic arithmetic operations on
1792
+ *`floating-point`* should conform to the
1793
+ `std::numeric_limits<`*`floating-point`*`>` traits associated with the
1794
+ floating-point type [[limits.syn]]. The floating-point
1795
+ environment [[cfenv]] for atomic arithmetic operations on
1796
+ *`floating-point`* may be different than the calling thread’s
1797
+ floating-point environment.
1798
+
1799
  ### Partial specialization for pointers <a id="atomics.types.pointer">[[atomics.types.pointer]]</a>
1800
 
1801
  ``` cpp
1802
  namespace std {
1803
  template<class T> struct atomic<T*> {
1804
  using value_type = T*;
1805
  using difference_type = ptrdiff_t;
1806
+
1807
  static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
1808
  bool is_lock_free() const volatile noexcept;
1809
  bool is_lock_free() const noexcept;
1810
+
1811
+ constexpr atomic() noexcept;
1812
+ constexpr atomic(T*) noexcept;
1813
+ atomic(const atomic&) = delete;
1814
+ atomic& operator=(const atomic&) = delete;
1815
+ atomic& operator=(const atomic&) volatile = delete;
1816
+
1817
+ void store(T*, memory_order = memory_order::seq_cst) volatile noexcept;
1818
+ void store(T*, memory_order = memory_order::seq_cst) noexcept;
1819
+ T* operator=(T*) volatile noexcept;
1820
+ T* operator=(T*) noexcept;
1821
+ T* load(memory_order = memory_order::seq_cst) const volatile noexcept;
1822
+ T* load(memory_order = memory_order::seq_cst) const noexcept;
1823
  operator T*() const volatile noexcept;
1824
  operator T*() const noexcept;
1825
+
1826
+ T* exchange(T*, memory_order = memory_order::seq_cst) volatile noexcept;
1827
+ T* exchange(T*, memory_order = memory_order::seq_cst) noexcept;
1828
  bool compare_exchange_weak(T*&, T*, memory_order, memory_order) volatile noexcept;
1829
  bool compare_exchange_weak(T*&, T*, memory_order, memory_order) noexcept;
1830
  bool compare_exchange_strong(T*&, T*, memory_order, memory_order) volatile noexcept;
1831
  bool compare_exchange_strong(T*&, T*, memory_order, memory_order) noexcept;
1832
+ bool compare_exchange_weak(T*&, T*,
1833
+ memory_order = memory_order::seq_cst) volatile noexcept;
1834
+ bool compare_exchange_weak(T*&, T*,
1835
+ memory_order = memory_order::seq_cst) noexcept;
1836
+ bool compare_exchange_strong(T*&, T*,
1837
+ memory_order = memory_order::seq_cst) volatile noexcept;
1838
+ bool compare_exchange_strong(T*&, T*,
1839
+ memory_order = memory_order::seq_cst) noexcept;
1840
 
1841
+ T* fetch_add(ptrdiff_t, memory_order = memory_order::seq_cst) volatile noexcept;
1842
+ T* fetch_add(ptrdiff_t, memory_order = memory_order::seq_cst) noexcept;
1843
+ T* fetch_sub(ptrdiff_t, memory_order = memory_order::seq_cst) volatile noexcept;
1844
+ T* fetch_sub(ptrdiff_t, memory_order = memory_order::seq_cst) noexcept;
 
 
 
1845
 
1846
  T* operator++(int) volatile noexcept;
1847
  T* operator++(int) noexcept;
1848
  T* operator--(int) volatile noexcept;
1849
  T* operator--(int) noexcept;
 
1853
  T* operator--() noexcept;
1854
  T* operator+=(ptrdiff_t) volatile noexcept;
1855
  T* operator+=(ptrdiff_t) noexcept;
1856
  T* operator-=(ptrdiff_t) volatile noexcept;
1857
  T* operator-=(ptrdiff_t) noexcept;
1858
+
1859
+ void wait(T*, memory_order = memory_order::seq_cst) const volatile noexcept;
1860
+ void wait(T*, memory_order = memory_order::seq_cst) const noexcept;
1861
+ void notify_one() volatile noexcept;
1862
+ void notify_one() noexcept;
1863
+ void notify_all() volatile noexcept;
1864
+ void notify_all() noexcept;
1865
  };
1866
  }
1867
  ```
1868
 
1869
  There is a partial specialization of the `atomic` class template for
1870
  pointers. Specializations of this partial specialization are
1871
+ standard-layout structs. They each have a trivial destructor.
 
1872
 
1873
  Descriptions are provided below only for members that differ from the
1874
  primary template.
1875
 
1876
  The following operations perform pointer arithmetic. The key, operator,
1877
  and computation correspondence is:
1878
 
1879
+ **Table: Atomic pointer computations** <a id="atomic.types.pointer.comp">[atomic.types.pointer.comp]</a>
1880
 
1881
  | | | | | | |
1882
  | ----- | --- | -------- | ----- | --- | ----------- |
1883
  | `add` | `+` | addition | `sub` | `-` | subtraction |
1884
 
1885
  ``` cpp
1886
+ T* fetch_key(ptrdiff_t operand, memory_order order = memory_order::seq_cst) volatile noexcept;
1887
+ T* fetch_key(ptrdiff_t operand, memory_order order = memory_order::seq_cst) noexcept;
1888
  ```
1889
 
1890
+ *Constraints:* For the `volatile` overload of this function,
1891
+ `is_always_lock_free` is `true`.
1892
+
1893
+ *Mandates:* `T` is a complete object type.
1894
 
1895
  [*Note 1*: Pointer arithmetic on `void*` or function pointers is
1896
  ill-formed. — *end note*]
1897
 
1898
  *Effects:* Atomically replaces the value pointed to by `this` with the
1899
  result of the computation applied to the value pointed to by `this` and
1900
  the given `operand`. Memory is affected according to the value of
1901
  `order`. These operations are atomic read-modify-write
1902
+ operations [[intro.multithread]].
1903
 
1904
  *Returns:* Atomically, the value pointed to by `this` immediately before
1905
  the effects.
1906
 
1907
  *Remarks:* The result may be an undefined address, but the operations
 
1910
  ``` cpp
1911
  T* operator op=(ptrdiff_t operand) volatile noexcept;
1912
  T* operator op=(ptrdiff_t operand) noexcept;
1913
  ```
1914
 
1915
+ *Constraints:* For the `volatile` overload of this function,
1916
+ `is_always_lock_free` is `true`.
1917
+
1918
  *Effects:* Equivalent to:
1919
  `return fetch_`*`key`*`(operand) `*`op`*` operand;`
1920
 
1921
  ### Member operators common to integers and pointers to objects <a id="atomics.types.memop">[[atomics.types.memop]]</a>
1922
 
1923
  ``` cpp
1924
+ value_type operator++(int) volatile noexcept;
1925
+ value_type operator++(int) noexcept;
1926
  ```
1927
 
1928
+ *Constraints:* For the `volatile` overload of this function,
1929
+ `is_always_lock_free` is `true`.
1930
+
1931
  *Effects:* Equivalent to: `return fetch_add(1);`
1932
 
1933
  ``` cpp
1934
+ value_type operator--(int) volatile noexcept;
1935
+ value_type operator--(int) noexcept;
1936
  ```
1937
 
1938
+ *Constraints:* For the `volatile` overload of this function,
1939
+ `is_always_lock_free` is `true`.
1940
+
1941
  *Effects:* Equivalent to: `return fetch_sub(1);`
1942
 
1943
  ``` cpp
1944
+ value_type operator++() volatile noexcept;
1945
+ value_type operator++() noexcept;
1946
  ```
1947
 
1948
+ *Constraints:* For the `volatile` overload of this function,
1949
+ `is_always_lock_free` is `true`.
1950
+
1951
  *Effects:* Equivalent to: `return fetch_add(1) + 1;`
1952
 
1953
  ``` cpp
1954
+ value_type operator--() volatile noexcept;
1955
+ value_type operator--() noexcept;
1956
  ```
1957
 
1958
+ *Constraints:* For the `volatile` overload of this function,
1959
+ `is_always_lock_free` is `true`.
1960
+
1961
  *Effects:* Equivalent to: `return fetch_sub(1) - 1;`
1962
 
1963
+ ### Partial specializations for smart pointers <a id="util.smartptr.atomic">[[util.smartptr.atomic]]</a>
1964
+
1965
+ The library provides partial specializations of the `atomic` template
1966
+ for shared-ownership smart pointers [[smartptr]]. The behavior of all
1967
+ operations is as specified in [[atomics.types.generic]], unless
1968
+ specified otherwise. The template parameter `T` of these partial
1969
+ specializations may be an incomplete type.
1970
+
1971
+ All changes to an atomic smart pointer in this subclause, and all
1972
+ associated `use_count` increments, are guaranteed to be performed
1973
+ atomically. Associated `use_count` decrements are sequenced after the
1974
+ atomic operation, but are not required to be part of it. Any associated
1975
+ deletion and deallocation are sequenced after the atomic update step and
1976
+ are not part of the atomic operation.
1977
+
1978
+ [*Note 1*: If the atomic operation uses locks, locks acquired by the
1979
+ implementation will be held when any `use_count` adjustments are
1980
+ performed, and will not be held when any destruction or deallocation
1981
+ resulting from this is performed. — *end note*]
1982
+
1983
+ [*Example 1*:
1984
+
1985
+ ``` cpp
1986
+ template<typename T> class atomic_list {
1987
+ struct node {
1988
+ T t;
1989
+ shared_ptr<node> next;
1990
+ };
1991
+ atomic<shared_ptr<node>> head;
1992
+
1993
+ public:
1994
+ auto find(T t) const {
1995
+ auto p = head.load();
1996
+ while (p && p->t != t)
1997
+ p = p->next;
1998
+
1999
+ return shared_ptr<node>(move(p));
2000
+ }
2001
+
2002
+ void push_front(T t) {
2003
+ auto p = make_shared<node>();
2004
+ p->t = t;
2005
+ p->next = head;
2006
+ while (!head.compare_exchange_weak(p->next, p)) {}
2007
+ }
2008
+ };
2009
+ ```
2010
+
2011
+ — *end example*]
2012
+
2013
+ #### Partial specialization for `shared_ptr` <a id="util.smartptr.atomic.shared">[[util.smartptr.atomic.shared]]</a>
2014
+
2015
+ ``` cpp
2016
+ namespace std {
2017
+ template<class T> struct atomic<shared_ptr<T>> {
2018
+ using value_type = shared_ptr<T>;
2019
+
2020
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
2021
+ bool is_lock_free() const noexcept;
2022
+
2023
+ constexpr atomic() noexcept;
2024
+ atomic(shared_ptr<T> desired) noexcept;
2025
+ atomic(const atomic&) = delete;
2026
+ void operator=(const atomic&) = delete;
2027
+
2028
+ shared_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
2029
+ operator shared_ptr<T>() const noexcept;
2030
+ void store(shared_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
2031
+ void operator=(shared_ptr<T> desired) noexcept;
2032
+
2033
+ shared_ptr<T> exchange(shared_ptr<T> desired,
2034
+ memory_order order = memory_order::seq_cst) noexcept;
2035
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
2036
+ memory_order success, memory_order failure) noexcept;
2037
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
2038
+ memory_order success, memory_order failure) noexcept;
2039
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
2040
+ memory_order order = memory_order::seq_cst) noexcept;
2041
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
2042
+ memory_order order = memory_order::seq_cst) noexcept;
2043
+
2044
+ void wait(shared_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
2045
+ void notify_one() noexcept;
2046
+ void notify_all() noexcept;
2047
+
2048
+ private:
2049
+ shared_ptr<T> p; // exposition only
2050
+ };
2051
+ }
2052
+ ```
2053
+
2054
+ ``` cpp
2055
+ constexpr atomic() noexcept;
2056
+ ```
2057
+
2058
+ *Effects:* Initializes `p{}`.
2059
+
2060
+ ``` cpp
2061
+ atomic(shared_ptr<T> desired) noexcept;
2062
+ ```
2063
+
2064
+ *Effects:* Initializes the object with the value `desired`.
2065
+ Initialization is not an atomic operation [[intro.multithread]].
2066
+
2067
+ [*Note 1*: It is possible to have an access to an atomic object `A`
2068
+ race with its construction, for example, by communicating the address of
2069
+ the just-constructed object `A` to another thread via
2070
+ `memory_order::relaxed` operations on a suitable atomic pointer
2071
+ variable, and then immediately accessing `A` in the receiving thread.
2072
+ This results in undefined behavior. — *end note*]
2073
+
2074
+ ``` cpp
2075
+ void store(shared_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
2076
+ ```
2077
+
2078
+ *Preconditions:* `order` is neither `memory_order::consume`,
2079
+ `memory_order::acquire`, nor `memory_order::acq_rel`.
2080
+
2081
+ *Effects:* Atomically replaces the value pointed to by `this` with the
2082
+ value of `desired` as if by `p.swap(desired)`. Memory is affected
2083
+ according to the value of `order`.
2084
+
2085
+ ``` cpp
2086
+ void operator=(shared_ptr<T> desired) noexcept;
2087
+ ```
2088
+
2089
+ *Effects:* Equivalent to `store(desired)`.
2090
+
2091
+ ``` cpp
2092
+ shared_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
2093
+ ```
2094
+
2095
+ *Preconditions:* `order` is neither `memory_order::release` nor
2096
+ `memory_order::acq_rel`.
2097
+
2098
+ *Effects:* Memory is affected according to the value of `order`.
2099
+
2100
+ *Returns:* Atomically returns `p`.
2101
+
2102
+ ``` cpp
2103
+ operator shared_ptr<T>() const noexcept;
2104
+ ```
2105
+
2106
+ *Effects:* Equivalent to: `return load();`
2107
+
2108
+ ``` cpp
2109
+ shared_ptr<T> exchange(shared_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
2110
+ ```
2111
+
2112
+ *Effects:* Atomically replaces `p` with `desired` as if by
2113
+ `p.swap(desired)`. Memory is affected according to the value of `order`.
2114
+ This is an atomic read-modify-write operation [[intro.races]].
2115
+
2116
+ *Returns:* Atomically returns the value of `p` immediately before the
2117
+ effects.
2118
+
2119
+ ``` cpp
2120
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
2121
+ memory_order success, memory_order failure) noexcept;
2122
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
2123
+ memory_order success, memory_order failure) noexcept;
2124
+ ```
2125
+
2126
+ *Preconditions:* `failure` is neither `memory_order::release` nor
2127
+ `memory_order::acq_rel`.
2128
+
2129
+ *Effects:* If `p` is equivalent to `expected`, assigns `desired` to `p`
2130
+ and has synchronization semantics corresponding to the value of
2131
+ `success`, otherwise assigns `p` to `expected` and has synchronization
2132
+ semantics corresponding to the value of `failure`.
2133
+
2134
+ *Returns:* `true` if `p` was equivalent to `expected`, `false`
2135
+ otherwise.
2136
+
2137
+ *Remarks:* Two `shared_ptr` objects are equivalent if they store the
2138
+ same pointer value and either share ownership or are both empty. The
2139
+ weak form may fail spuriously. See [[atomics.types.operations]].
2140
+
2141
+ If the operation returns `true`, `expected` is not accessed after the
2142
+ atomic update and the operation is an atomic read-modify-write
2143
+ operation [[intro.multithread]] on the memory pointed to by `this`.
2144
+ Otherwise, the operation is an atomic load operation on that memory, and
2145
+ `expected` is updated with the existing value read from the atomic
2146
+ object in the attempted atomic update. The `use_count` update
2147
+ corresponding to the write to `expected` is part of the atomic
2148
+ operation. The write to `expected` itself is not required to be part of
2149
+ the atomic operation.
2150
+
2151
+ ``` cpp
2152
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
2153
+ memory_order order = memory_order::seq_cst) noexcept;
2154
+ ```
2155
+
2156
+ *Effects:* Equivalent to:
2157
+
2158
+ ``` cpp
2159
+ return compare_exchange_weak(expected, desired, order, fail_order);
2160
+ ```
2161
+
2162
+ where `fail_order` is the same as `order` except that a value of
2163
+ `memory_order::acq_rel` shall be replaced by the value
2164
+ `memory_order::acquire` and a value of `memory_order::release` shall be
2165
+ replaced by the value `memory_order::relaxed`.
2166
+
2167
+ ``` cpp
2168
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
2169
+ memory_order order = memory_order::seq_cst) noexcept;
2170
+ ```
2171
+
2172
+ *Effects:* Equivalent to:
2173
+
2174
+ ``` cpp
2175
+ return compare_exchange_strong(expected, desired, order, fail_order);
2176
+ ```
2177
+
2178
+ where `fail_order` is the same as `order` except that a value of
2179
+ `memory_order::acq_rel` shall be replaced by the value
2180
+ `memory_order::acquire` and a value of `memory_order::release` shall be
2181
+ replaced by the value `memory_order::relaxed`.
2182
+
2183
+ ``` cpp
2184
+ void wait(shared_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
2185
+ ```
2186
+
2187
+ *Preconditions:* `order` is neither `memory_order::release` nor
2188
+ `memory_order::acq_rel`.
2189
+
2190
+ *Effects:* Repeatedly performs the following steps, in order:
2191
+
2192
+ - Evaluates `load(order)` and compares it to `old`.
2193
+ - If the two are not equivalent, returns.
2194
+ - Blocks until it is unblocked by an atomic notifying operation or is
2195
+ unblocked spuriously.
2196
+
2197
+ *Remarks:* Two `shared_ptr` objects are equivalent if they store the
2198
+ same pointer and either share ownership or are both empty. This function
2199
+ is an atomic waiting operation [[atomics.wait]].
2200
+
2201
+ ``` cpp
2202
+ void notify_one() noexcept;
2203
+ ```
2204
+
2205
+ *Effects:* Unblocks the execution of at least one atomic waiting
2206
+ operation that is eligible to be unblocked [[atomics.wait]] by this
2207
+ call, if any such atomic waiting operations exist.
2208
+
2209
+ *Remarks:* This function is an atomic notifying
2210
+ operation [[atomics.wait]].
2211
+
2212
+ ``` cpp
2213
+ void notify_all() noexcept;
2214
+ ```
2215
+
2216
+ *Effects:* Unblocks the execution of all atomic waiting operations that
2217
+ are eligible to be unblocked [[atomics.wait]] by this call.
2218
+
2219
+ *Remarks:* This function is an atomic notifying
2220
+ operation [[atomics.wait]].
2221
+
2222
+ #### Partial specialization for `weak_ptr` <a id="util.smartptr.atomic.weak">[[util.smartptr.atomic.weak]]</a>
2223
+
2224
+ ``` cpp
2225
+ namespace std {
2226
+ template<class T> struct atomic<weak_ptr<T>> {
2227
+ using value_type = weak_ptr<T>;
2228
+
2229
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
2230
+ bool is_lock_free() const noexcept;
2231
+
2232
+ constexpr atomic() noexcept;
2233
+ atomic(weak_ptr<T> desired) noexcept;
2234
+ atomic(const atomic&) = delete;
2235
+ void operator=(const atomic&) = delete;
2236
+
2237
+ weak_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
2238
+ operator weak_ptr<T>() const noexcept;
2239
+ void store(weak_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
2240
+ void operator=(weak_ptr<T> desired) noexcept;
2241
+
2242
+ weak_ptr<T> exchange(weak_ptr<T> desired,
2243
+ memory_order order = memory_order::seq_cst) noexcept;
2244
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
2245
+ memory_order success, memory_order failure) noexcept;
2246
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
2247
+ memory_order success, memory_order failure) noexcept;
2248
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
2249
+ memory_order order = memory_order::seq_cst) noexcept;
2250
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
2251
+ memory_order order = memory_order::seq_cst) noexcept;
2252
+
2253
+ void wait(weak_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
2254
+ void notify_one() noexcept;
2255
+ void notify_all() noexcept;
2256
+
2257
+ private:
2258
+ weak_ptr<T> p; // exposition only
2259
+ };
2260
+ }
2261
+ ```
2262
+
2263
+ ``` cpp
2264
+ constexpr atomic() noexcept;
2265
+ ```
2266
+
2267
+ *Effects:* Initializes `p{}`.
2268
+
2269
+ ``` cpp
2270
+ atomic(weak_ptr<T> desired) noexcept;
2271
+ ```
2272
+
2273
+ *Effects:* Initializes the object with the value `desired`.
2274
+ Initialization is not an atomic operation [[intro.multithread]].
2275
+
2276
+ [*Note 1*: It is possible to have an access to an atomic object `A`
2277
+ race with its construction, for example, by communicating the address of
2278
+ the just-constructed object `A` to another thread via
2279
+ `memory_order::relaxed` operations on a suitable atomic pointer
2280
+ variable, and then immediately accessing `A` in the receiving thread.
2281
+ This results in undefined behavior. — *end note*]
2282
+
2283
+ ``` cpp
2284
+ void store(weak_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
2285
+ ```
2286
+
2287
+ *Preconditions:* `order` is neither `memory_order::consume`,
2288
+ `memory_order::acquire`, nor `memory_order::acq_rel`.
2289
+
2290
+ *Effects:* Atomically replaces the value pointed to by `this` with the
2291
+ value of `desired` as if by `p.swap(desired)`. Memory is affected
2292
+ according to the value of `order`.
2293
+
2294
+ ``` cpp
2295
+ void operator=(weak_ptr<T> desired) noexcept;
2296
+ ```
2297
+
2298
+ *Effects:* Equivalent to `store(desired)`.
2299
+
2300
+ ``` cpp
2301
+ weak_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
2302
+ ```
2303
+
2304
+ *Preconditions:* `order` is neither `memory_order::release` nor
2305
+ `memory_order::acq_rel`.
2306
+
2307
+ *Effects:* Memory is affected according to the value of `order`.
2308
+
2309
+ *Returns:* Atomically returns `p`.
2310
+
2311
+ ``` cpp
2312
+ operator weak_ptr<T>() const noexcept;
2313
+ ```
2314
+
2315
+ *Effects:* Equivalent to: `return load();`
2316
+
2317
+ ``` cpp
2318
+ weak_ptr<T> exchange(weak_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
2319
+ ```
2320
+
2321
+ *Effects:* Atomically replaces `p` with `desired` as if by
2322
+ `p.swap(desired)`. Memory is affected according to the value of `order`.
2323
+ This is an atomic read-modify-write operation [[intro.races]].
2324
+
2325
+ *Returns:* Atomically returns the value of `p` immediately before the
2326
+ effects.
2327
+
2328
+ ``` cpp
2329
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
2330
+ memory_order success, memory_order failure) noexcept;
2331
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
2332
+ memory_order success, memory_order failure) noexcept;
2333
+ ```
2334
+
2335
+ *Preconditions:* `failure` is neither `memory_order::release` nor
2336
+ `memory_order::acq_rel`.
2337
+
2338
+ *Effects:* If `p` is equivalent to `expected`, assigns `desired` to `p`
2339
+ and has synchronization semantics corresponding to the value of
2340
+ `success`, otherwise assigns `p` to `expected` and has synchronization
2341
+ semantics corresponding to the value of `failure`.
2342
+
2343
+ *Returns:* `true` if `p` was equivalent to `expected`, `false`
2344
+ otherwise.
2345
+
2346
+ *Remarks:* Two `weak_ptr` objects are equivalent if they store the same
2347
+ pointer value and either share ownership or are both empty. The weak
2348
+ form may fail spuriously. See [[atomics.types.operations]].
2349
+
2350
+ If the operation returns `true`, `expected` is not accessed after the
2351
+ atomic update and the operation is an atomic read-modify-write
2352
+ operation [[intro.multithread]] on the memory pointed to by `this`.
2353
+ Otherwise, the operation is an atomic load operation on that memory, and
2354
+ `expected` is updated with the existing value read from the atomic
2355
+ object in the attempted atomic update. The `use_count` update
2356
+ corresponding to the write to `expected` is part of the atomic
2357
+ operation. The write to `expected` itself is not required to be part of
2358
+ the atomic operation.
2359
+
2360
+ ``` cpp
2361
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
2362
+ memory_order order = memory_order::seq_cst) noexcept;
2363
+ ```
2364
+
2365
+ *Effects:* Equivalent to:
2366
+
2367
+ ``` cpp
2368
+ return compare_exchange_weak(expected, desired, order, fail_order);
2369
+ ```
2370
+
2371
+ where `fail_order` is the same as `order` except that a value of
2372
+ `memory_order::acq_rel` shall be replaced by the value
2373
+ `memory_order::acquire` and a value of `memory_order::release` shall be
2374
+ replaced by the value `memory_order::relaxed`.
2375
+
2376
+ ``` cpp
2377
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
2378
+ memory_order order = memory_order::seq_cst) noexcept;
2379
+ ```
2380
+
2381
+ *Effects:* Equivalent to:
2382
+
2383
+ ``` cpp
2384
+ return compare_exchange_strong(expected, desired, order, fail_order);
2385
+ ```
2386
+
2387
+ where `fail_order` is the same as `order` except that a value of
2388
+ `memory_order::acq_rel` shall be replaced by the value
2389
+ `memory_order::acquire` and a value of `memory_order::release` shall be
2390
+ replaced by the value `memory_order::relaxed`.
2391
+
2392
+ ``` cpp
2393
+ void wait(weak_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
2394
+ ```
2395
+
2396
+ *Preconditions:* `order` is neither `memory_order::release` nor
2397
+ `memory_order::acq_rel`.
2398
+
2399
+ *Effects:* Repeatedly performs the following steps, in order:
2400
+
2401
+ - Evaluates `load(order)` and compares it to `old`.
2402
+ - If the two are not equivalent, returns.
2403
+ - Blocks until it is unblocked by an atomic notifying operation or is
2404
+ unblocked spuriously.
2405
+
2406
+ *Remarks:* Two `weak_ptr` objects are equivalent if they store the same
2407
+ pointer and either share ownership or are both empty. This function is
2408
+ an atomic waiting operation [[atomics.wait]].
2409
+
2410
+ ``` cpp
2411
+ void notify_one() noexcept;
2412
+ ```
2413
+
2414
+ *Effects:* Unblocks the execution of at least one atomic waiting
2415
+ operation that is eligible to be unblocked [[atomics.wait]] by this
2416
+ call, if any such atomic waiting operations exist.
2417
+
2418
+ *Remarks:* This function is an atomic notifying
2419
+ operation [[atomics.wait]].
2420
+
2421
+ ``` cpp
2422
+ void notify_all() noexcept;
2423
+ ```
2424
+
2425
+ *Effects:* Unblocks the execution of all atomic waiting operations that
2426
+ are eligible to be unblocked [[atomics.wait]] by this call.
2427
+
2428
+ *Remarks:* This function is an atomic notifying
2429
+ operation [[atomics.wait]].
2430
+
2431
  ## Non-member functions <a id="atomics.nonmembers">[[atomics.nonmembers]]</a>
2432
 
2433
  A non-member function template whose name matches the pattern `atomic_f`
2434
  or the pattern `atomic_f_explicit` invokes the member function `f`, with
2435
  the value of the first parameter as the object expression and the values
 
2437
  function call, in order. An argument for a parameter of type
2438
  `atomic<T>::value_type*` is dereferenced when passed to the member
2439
  function call. If no such member function exists, the program is
2440
  ill-formed.
2441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2442
  [*Note 1*: The non-member functions enable programmers to write code
2443
  that can be compiled as either C or C++, for example in a shared header
2444
  file. — *end note*]
2445
 
2446
  ## Flag type and operations <a id="atomics.flag">[[atomics.flag]]</a>
2447
 
2448
  ``` cpp
2449
  namespace std {
2450
  struct atomic_flag {
2451
+ constexpr atomic_flag() noexcept;
 
 
 
 
 
2452
  atomic_flag(const atomic_flag&) = delete;
2453
  atomic_flag& operator=(const atomic_flag&) = delete;
2454
  atomic_flag& operator=(const atomic_flag&) volatile = delete;
2455
+
2456
+ bool test(memory_order = memory_order::seq_cst) const volatile noexcept;
2457
+ bool test(memory_order = memory_order::seq_cst) const noexcept;
2458
+ bool test_and_set(memory_order = memory_order::seq_cst) volatile noexcept;
2459
+ bool test_and_set(memory_order = memory_order::seq_cst) noexcept;
2460
+ void clear(memory_order = memory_order::seq_cst) volatile noexcept;
2461
+ void clear(memory_order = memory_order::seq_cst) noexcept;
2462
+
2463
+ void wait(bool, memory_order = memory_order::seq_cst) const volatile noexcept;
2464
+ void wait(bool, memory_order = memory_order::seq_cst) const noexcept;
2465
+ void notify_one() volatile noexcept;
2466
+ void notify_one() noexcept;
2467
+ void notify_all() volatile noexcept;
2468
+ void notify_all() noexcept;
2469
  };
 
 
 
 
 
 
 
 
 
 
 
2470
  }
2471
  ```
2472
 
2473
  The `atomic_flag` type provides the classic test-and-set functionality.
2474
  It has two states, set and clear.
 
2477
 
2478
  [*Note 1*: Hence the operations should also be
2479
  address-free. — *end note*]
2480
 
2481
  The `atomic_flag` type is a standard-layout struct. It has a trivial
2482
+ destructor.
2483
 
2484
+ ``` cpp
2485
+ constexpr atomic_flag::atomic_flag() noexcept;
2486
+ ```
2487
+
2488
+ *Effects:* Initializes `*this` to the clear state.
2489
 
2490
  ``` cpp
2491
+ bool atomic_flag_test(const volatile atomic_flag* object) noexcept;
2492
+ bool atomic_flag_test(const atomic_flag* object) noexcept;
2493
+ bool atomic_flag_test_explicit(const volatile atomic_flag* object,
2494
+ memory_order order) noexcept;
2495
+ bool atomic_flag_test_explicit(const atomic_flag* object,
2496
+ memory_order order) noexcept;
2497
+ bool atomic_flag::test(memory_order order = memory_order::seq_cst) const volatile noexcept;
2498
+ bool atomic_flag::test(memory_order order = memory_order::seq_cst) const noexcept;
2499
  ```
2500
 
2501
+ For `atomic_flag_test`, let `order` be `memory_order::seq_cst`.
2502
+
2503
+ *Preconditions:* `order` is neither `memory_order::release` nor
2504
+ `memory_order::acq_rel`.
2505
+
2506
+ *Effects:* Memory is affected according to the value of `order`.
2507
+
2508
+ *Returns:* Atomically returns the value pointed to by `object` or
2509
+ `this`.
2510
 
2511
  ``` cpp
2512
  bool atomic_flag_test_and_set(volatile atomic_flag* object) noexcept;
2513
  bool atomic_flag_test_and_set(atomic_flag* object) noexcept;
2514
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag* object, memory_order order) noexcept;
2515
  bool atomic_flag_test_and_set_explicit(atomic_flag* object, memory_order order) noexcept;
2516
+ bool atomic_flag::test_and_set(memory_order order = memory_order::seq_cst) volatile noexcept;
2517
+ bool atomic_flag::test_and_set(memory_order order = memory_order::seq_cst) noexcept;
2518
  ```
2519
 
2520
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
2521
  to `true`. Memory is affected according to the value of `order`. These
2522
  operations are atomic read-modify-write
2523
+ operations [[intro.multithread]].
2524
 
2525
  *Returns:* Atomically, the value of the object immediately before the
2526
  effects.
2527
 
2528
  ``` cpp
2529
  void atomic_flag_clear(volatile atomic_flag* object) noexcept;
2530
  void atomic_flag_clear(atomic_flag* object) noexcept;
2531
  void atomic_flag_clear_explicit(volatile atomic_flag* object, memory_order order) noexcept;
2532
  void atomic_flag_clear_explicit(atomic_flag* object, memory_order order) noexcept;
2533
+ void atomic_flag::clear(memory_order order = memory_order::seq_cst) volatile noexcept;
2534
+ void atomic_flag::clear(memory_order order = memory_order::seq_cst) noexcept;
2535
  ```
2536
 
2537
+ *Preconditions:* The `order` argument is neither
2538
+ `memory_order::consume`, `memory_order::acquire`, nor
2539
+ `memory_order::acq_rel`.
2540
 
2541
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
2542
  to `false`. Memory is affected according to the value of `order`.
2543
 
2544
+ ``` cpp
2545
+ void atomic_flag_wait(const volatile atomic_flag* object, bool old) noexcept;
2546
+ void atomic_flag_wait(const atomic_flag* object, bool old) noexcept;
2547
+ void atomic_flag_wait_explicit(const volatile atomic_flag* object,
2548
+ bool old, memory_order order) noexcept;
2549
+ void atomic_flag_wait_explicit(const atomic_flag* object,
2550
+ bool old, memory_order order) noexcept;
2551
+ void atomic_flag::wait(bool old, memory_order order =
2552
+ memory_order::seq_cst) const volatile noexcept;
2553
+ void atomic_flag::wait(bool old, memory_order order =
2554
+ memory_order::seq_cst) const noexcept;
2555
+ ```
2556
+
2557
+ For `atomic_flag_wait`, let `order` be `memory_order::seq_cst`. Let
2558
+ `flag` be `object` for the non-member functions and `this` for the
2559
+ member functions.
2560
+
2561
+ *Preconditions:* `order` is neither `memory_order::release` nor
2562
+ `memory_order::acq_rel`.
2563
+
2564
+ *Effects:* Repeatedly performs the following steps, in order:
2565
+
2566
+ - Evaluates `flag->test(order) != old`.
2567
+ - If the result of that evaluation is `true`, returns.
2568
+ - Blocks until it is unblocked by an atomic notifying operation or is
2569
+ unblocked spuriously.
2570
+
2571
+ *Remarks:* This function is an atomic waiting
2572
+ operation [[atomics.wait]].
2573
+
2574
+ ``` cpp
2575
+ void atomic_flag_notify_one(volatile atomic_flag* object) noexcept;
2576
+ void atomic_flag_notify_one(atomic_flag* object) noexcept;
2577
+ void atomic_flag::notify_one() volatile noexcept;
2578
+ void atomic_flag::notify_one() noexcept;
2579
+ ```
2580
+
2581
+ *Effects:* Unblocks the execution of at least one atomic waiting
2582
+ operation that is eligible to be unblocked [[atomics.wait]] by this
2583
+ call, if any such atomic waiting operations exist.
2584
+
2585
+ *Remarks:* This function is an atomic notifying
2586
+ operation [[atomics.wait]].
2587
+
2588
+ ``` cpp
2589
+ void atomic_flag_notify_all(volatile atomic_flag* object) noexcept;
2590
+ void atomic_flag_notify_all(atomic_flag* object) noexcept;
2591
+ void atomic_flag::notify_all() volatile noexcept;
2592
+ void atomic_flag::notify_all() noexcept;
2593
+ ```
2594
+
2595
+ *Effects:* Unblocks the execution of all atomic waiting operations that
2596
+ are eligible to be unblocked [[atomics.wait]] by this call.
2597
+
2598
+ *Remarks:* This function is an atomic notifying
2599
+ operation [[atomics.wait]].
2600
+
2601
  ## Fences <a id="atomics.fences">[[atomics.fences]]</a>
2602
 
2603
+ This subclause introduces synchronization primitives called *fences*.
2604
  Fences can have acquire semantics, release semantics, or both. A fence
2605
  with acquire semantics is called an *acquire fence*. A fence with
2606
  release semantics is called a *release fence*.
2607
 
2608
+ A release fence A synchronizes with an acquire fence B if there exist
2609
+ atomic operations X and Y, both operating on some atomic object M, such
2610
+ that A is sequenced before X, X modifies M, Y is sequenced before B, and
2611
+ Y reads the value written by X or a value written by any side effect in
2612
+ the hypothetical release sequence X would head if it were a release
2613
+ operation.
2614
 
2615
+ A release fence A synchronizes with an atomic operation B that performs
2616
+ an acquire operation on an atomic object M if there exists an atomic
2617
+ operation X such that A is sequenced before X, X modifies M, and B reads
2618
+ the value written by X or a value written by any side effect in the
2619
+ hypothetical release sequence X would head if it were a release
2620
+ operation.
2621
 
2622
+ An atomic operation A that is a release operation on an atomic object M
2623
+ synchronizes with an acquire fence B if there exists some atomic
2624
+ operation X on M such that X is sequenced before B and reads the value
2625
+ written by A or a value written by any side effect in the release
2626
+ sequence headed by A.
2627
 
2628
  ``` cpp
2629
  extern "C" void atomic_thread_fence(memory_order order) noexcept;
2630
  ```
2631
 
2632
  *Effects:* Depending on the value of `order`, this operation:
2633
 
2634
+ - has no effects, if `order == memory_order::relaxed`;
2635
+ - is an acquire fence, if `order == memory_order::acquire` or
2636
+ `order == memory_order::consume`;
2637
+ - is a release fence, if `order == memory_order::release`;
2638
  - is both an acquire fence and a release fence, if
2639
+ `order == memory_order::acq_rel`;
2640
  - is a sequentially consistent acquire and release fence, if
2641
+ `order == memory_order::seq_cst`.
2642
 
2643
  ``` cpp
2644
  extern "C" void atomic_signal_fence(memory_order order) noexcept;
2645
  ```
2646
 
 
2654
  inhibited in the same way as with `atomic_thread_fence`, but the
2655
  hardware fence instructions that `atomic_thread_fence` would have
2656
  inserted are not emitted. — *end note*]
2657
 
2658
  <!-- Link reference definitions -->
2659
+ [atomic.types.int.comp]: #atomic.types.int.comp
2660
+ [atomic.types.pointer.comp]: #atomic.types.pointer.comp
2661
  [atomics]: #atomics
2662
  [atomics.alias]: #atomics.alias
2663
  [atomics.fences]: #atomics.fences
2664
  [atomics.flag]: #atomics.flag
2665
  [atomics.general]: #atomics.general
2666
  [atomics.lockfree]: #atomics.lockfree
2667
  [atomics.nonmembers]: #atomics.nonmembers
2668
  [atomics.order]: #atomics.order
2669
+ [atomics.ref.float]: #atomics.ref.float
2670
+ [atomics.ref.generic]: #atomics.ref.generic
2671
+ [atomics.ref.int]: #atomics.ref.int
2672
+ [atomics.ref.memop]: #atomics.ref.memop
2673
+ [atomics.ref.ops]: #atomics.ref.ops
2674
+ [atomics.ref.pointer]: #atomics.ref.pointer
2675
+ [atomics.summary]: #atomics.summary
2676
  [atomics.syn]: #atomics.syn
2677
+ [atomics.types.float]: #atomics.types.float
2678
  [atomics.types.generic]: #atomics.types.generic
2679
  [atomics.types.int]: #atomics.types.int
2680
  [atomics.types.memop]: #atomics.types.memop
2681
  [atomics.types.operations]: #atomics.types.operations
2682
  [atomics.types.pointer]: #atomics.types.pointer
2683
+ [atomics.wait]: #atomics.wait
2684
+ [basic.align]: basic.md#basic.align
2685
+ [basic.fundamental]: basic.md#basic.fundamental
2686
+ [basic.life]: basic.md#basic.life
2687
  [basic.types]: basic.md#basic.types
2688
+ [cfenv]: numerics.md#cfenv
2689
+ [compliance]: library.md#compliance
2690
+ [expr.pre]: expr.md#expr.pre
2691
+ [intro.multithread]: basic.md#intro.multithread
2692
+ [intro.progress]: basic.md#intro.progress
2693
+ [intro.races]: basic.md#intro.races
2694
+ [limits.syn]: support.md#limits.syn
2695
+ [smartptr]: utilities.md#smartptr
2696
+ [util.smartptr.atomic]: #util.smartptr.atomic
2697
+ [util.smartptr.atomic.shared]: #util.smartptr.atomic.shared
2698
+ [util.smartptr.atomic.weak]: #util.smartptr.atomic.weak