From Jason Turner

[atomics]

Diff to HTML by rtfpessoa

Files changed (1) hide show
  1. tmp/tmp2exsiy15/{from.md → to.md} +637 -534
tmp/tmp2exsiy15/{from.md → to.md} RENAMED
@@ -39,142 +39,194 @@ namespace std {
39
  #define ATOMIC_INT_LOCK_FREE unspecified
40
  #define ATOMIC_LONG_LOCK_FREE unspecified
41
  #define ATOMIC_LLONG_LOCK_FREE unspecified
42
  #define ATOMIC_POINTER_LOCK_FREE unspecified
43
 
44
- // [atomics.types.generic], generic types
45
  template<class T> struct atomic;
46
- template<> struct atomic<integral>;
47
  template<class T> struct atomic<T*>;
48
 
49
- // [atomics.types.operations.general], general operations on atomic types
50
- // In the following declarations, atomic-type is either
51
- // atomic<T> or a named base class for T from
52
- // Table~[tab:atomics.integral] or inferred from Table~[tab:atomics.typedefs] or from bool.
53
- // If it is atomic<T>, then the declaration is a template
54
- // declaration prefixed with template <class T>.
55
- bool atomic_is_lock_free(const volatile atomic-type*) noexcept;
56
- bool atomic_is_lock_free(const atomic-type*) noexcept;
57
- void atomic_init(volatile atomic-type*, T) noexcept;
58
- void atomic_init(atomic-type*, T) noexcept;
59
- void atomic_store(volatile atomic-type*, T) noexcept;
60
- void atomic_store(atomic-type*, T) noexcept;
61
- void atomic_store_explicit(volatile atomic-type*, T, memory_order) noexcept;
62
- void atomic_store_explicit(atomic-type*, T, memory_order) noexcept;
63
- T atomic_load(const volatile atomic-type*) noexcept;
64
- T atomic_load(const atomic-type*) noexcept;
65
- T atomic_load_explicit(const volatile atomic-type*, memory_order) noexcept;
66
- T atomic_load_explicit(const atomic-type*, memory_order) noexcept;
67
- T atomic_exchange(volatile atomic-type*, T) noexcept;
68
- T atomic_exchange(atomic-type*, T) noexcept;
69
- T atomic_exchange_explicit(volatile atomic-type*, T, memory_order) noexcept;
70
- T atomic_exchange_explicit(atomic-type*, T, memory_order) noexcept;
71
- bool atomic_compare_exchange_weak(volatile atomic-type*, T*, T) noexcept;
72
- bool atomic_compare_exchange_weak(atomic-type*, T*, T) noexcept;
73
- bool atomic_compare_exchange_strong(volatile atomic-type*, T*, T) noexcept;
74
- bool atomic_compare_exchange_strong(atomic-type*, T*, T) noexcept;
75
- bool atomic_compare_exchange_weak_explicit(volatile atomic-type*, T*, T,
76
- memory_order, memory_order) noexcept;
77
- bool atomic_compare_exchange_weak_explicit(atomic-type*, T*, T,
78
- memory_order, memory_order) noexcept;
79
- bool atomic_compare_exchange_strong_explicit(volatile atomic-type*, T*, T,
80
- memory_order, memory_order) noexcept;
81
- bool atomic_compare_exchange_strong_explicit(atomic-type*, T*, T,
82
- memory_order, memory_order) noexcept;
83
-
84
- // [atomics.types.operations.templ], templated operations on atomic types
85
  template<class T>
86
- T atomic_fetch_add(volatile atomic<T>*, T) noexcept;
87
  template<class T>
88
- T atomic_fetch_add(atomic<T>*, T) noexcept;
89
  template<class T>
90
- T atomic_fetch_add_explicit(volatile atomic<T>*, T, memory_order) noexcept;
91
  template<class T>
92
- T atomic_fetch_add_explicit(atomic<T>*, T, memory_order) noexcept;
93
  template<class T>
94
- T atomic_fetch_sub(volatile atomic<T>*, T) noexcept;
95
  template<class T>
96
- T atomic_fetch_sub(atomic<T>*, T) noexcept;
97
  template<class T>
98
- T atomic_fetch_sub_explicit(volatile atomic<T>*, T, memory_order) noexcept;
 
99
  template<class T>
100
- T atomic_fetch_sub_explicit(atomic<T>*, T, memory_order) noexcept;
 
101
  template<class T>
102
- T atomic_fetch_and(volatile atomic<T>*, T) noexcept;
103
  template<class T>
104
- T atomic_fetch_and(atomic<T>*, T) noexcept;
105
  template<class T>
106
- T atomic_fetch_and_explicit(volatile atomic<T>*, T, memory_order) noexcept;
107
  template<class T>
108
- T atomic_fetch_and_explicit(atomic<T>*, T, memory_order) noexcept;
109
  template<class T>
110
- T atomic_fetch_or(volatile atomic<T>*, T) noexcept;
111
  template<class T>
112
- T atomic_fetch_or(atomic<T>*, T) noexcept;
113
  template<class T>
114
- T atomic_fetch_or_explicit(volatile atomic<T>*, T, memory_order) noexcept;
 
115
  template<class T>
116
- T atomic_fetch_or_explicit(atomic<T>*, T, memory_order) noexcept;
 
117
  template<class T>
118
- T atomic_fetch_xor(volatile atomic<T>*, T) noexcept;
 
 
119
  template<class T>
120
- T atomic_fetch_xor(atomic<T>*, T) noexcept;
 
 
121
  template<class T>
122
- T atomic_fetch_xor_explicit(volatile atomic<T>*, T, memory_order) noexcept;
 
 
123
  template<class T>
124
- T atomic_fetch_xor_explicit(atomic<T>*, T, memory_order) noexcept;
125
-
126
- // [atomics.types.operations.arith], arithmetic operations on atomic types
127
- // In the following declarations, atomic-integral is either
128
- // atomic<T> or a named base class for T from
129
- // Table~[tab:atomics.integral] or inferred from Table~[tab:atomics.typedefs].
130
- // If it is atomic<T>, then the declaration is a template
131
- // specialization declaration prefixed with template <>.
132
-
133
- integral atomic_fetch_add(volatile atomic-integral*, integral) noexcept;
134
- integral atomic_fetch_add(atomic-integral*, integral) noexcept;
135
- integral atomic_fetch_add_explicit(volatile atomic-integral*, integral, memory_order) noexcept;
136
- integral atomic_fetch_add_explicit(atomic-integral*, integral, memory_order) noexcept;
137
- integral atomic_fetch_sub(volatile atomic-integral*, integral) noexcept;
138
- integral atomic_fetch_sub(atomic-integral*, integral) noexcept;
139
- integral atomic_fetch_sub_explicit(volatile atomic-integral*, integral, memory_order) noexcept;
140
- integral atomic_fetch_sub_explicit(atomic-integral*, integral, memory_order) noexcept;
141
- integral atomic_fetch_and(volatile atomic-integral*, integral) noexcept;
142
- integral atomic_fetch_and(atomic-integral*, integral) noexcept;
143
- integral atomic_fetch_and_explicit(volatile atomic-integral*, integral, memory_order) noexcept;
144
- integral atomic_fetch_and_explicit(atomic-integral*, integral, memory_order) noexcept;
145
- integral atomic_fetch_or(volatile atomic-integral*, integral) noexcept;
146
- integral atomic_fetch_or(atomic-integral*, integral) noexcept;
147
- integral atomic_fetch_or_explicit(volatile atomic-integral*, integral, memory_order) noexcept;
148
- integral atomic_fetch_or_explicit(atomic-integral*, integral, memory_order) noexcept;
149
- integral atomic_fetch_xor(volatile atomic-integral*, integral) noexcept;
150
- integral atomic_fetch_xor(atomic-integral*, integral) noexcept;
151
- integral atomic_fetch_xor_explicit(volatile atomic-integral*, integral, memory_order) noexcept;
152
- integral atomic_fetch_xor_explicit(atomic-integral*, integral, memory_order) noexcept;
153
-
154
- // [atomics.types.operations.pointer], partial specializations for pointers
155
 
156
  template <class T>
157
- T* atomic_fetch_add(volatile atomic<T*>*, ptrdiff_t) noexcept;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  template <class T>
159
- T* atomic_fetch_add(atomic<T*>*, ptrdiff_t) noexcept;
160
  template <class T>
161
- T* atomic_fetch_add_explicit(volatile atomic<T*>*, ptrdiff_t, memory_order) noexcept;
 
162
  template <class T>
163
- T* atomic_fetch_add_explicit(atomic<T*>*, ptrdiff_t, memory_order) noexcept;
 
164
  template <class T>
165
- T* atomic_fetch_sub(volatile atomic<T*>*, ptrdiff_t) noexcept;
166
  template <class T>
167
- T* atomic_fetch_sub(atomic<T*>*, ptrdiff_t) noexcept;
168
  template <class T>
169
- T* atomic_fetch_sub_explicit(volatile atomic<T*>*, ptrdiff_t, memory_order) noexcept;
 
170
  template <class T>
171
- T* atomic_fetch_sub_explicit(atomic<T*>*, ptrdiff_t, memory_order) noexcept;
 
172
 
173
- // [atomics.types.operations.req], initialization
174
  #define ATOMIC_VAR_INIT(value) see below
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  // [atomics.flag], flag type and operations
177
  struct atomic_flag;
178
  bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
179
  bool atomic_flag_test_and_set(atomic_flag*) noexcept;
180
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
@@ -189,18 +241,24 @@ namespace std {
189
  extern "C" void atomic_thread_fence(memory_order) noexcept;
190
  extern "C" void atomic_signal_fence(memory_order) noexcept;
191
  }
192
  ```
193
 
 
 
 
 
 
 
194
  ## Order and consistency <a id="atomics.order">[[atomics.order]]</a>
195
 
196
  ``` cpp
197
  namespace std {
198
- typedef enum memory_order {
199
  memory_order_relaxed, memory_order_consume, memory_order_acquire,
200
  memory_order_release, memory_order_acq_rel, memory_order_seq_cst
201
- } memory_order;
202
  }
203
  ```
204
 
205
  The enumeration `memory_order` specifies the detailed regular
206
  (non-atomic) memory synchronization order as defined in
@@ -210,19 +268,24 @@ enumerated values and their meanings are as follows:
210
  - `memory_order_relaxed`: no operation orders memory.
211
  - `memory_order_release`, `memory_order_acq_rel`, and
212
  `memory_order_seq_cst`: a store operation performs a release operation
213
  on the affected memory location.
214
  - `memory_order_consume`: a load operation performs a consume operation
215
- on the affected memory location.
 
 
 
 
216
  - `memory_order_acquire`, `memory_order_acq_rel`, and
217
  `memory_order_seq_cst`: a load operation performs an acquire operation
218
  on the affected memory location.
219
 
220
- Atomic operations specifying `memory_order_relaxed` are relaxed with
221
- respect to memory ordering. Implementations must still guarantee that
222
- any given atomic access to a particular atomic object be indivisible
223
- with respect to all other atomic accesses to that object.
 
224
 
225
  An atomic operation *A* that performs a release operation on an atomic
226
  object *M* synchronizes with an atomic operation *B* that performs an
227
  acquire operation on *M* and takes its value from any side effect in the
228
  release sequence headed by *A*.
@@ -238,14 +301,14 @@ of the following values:
238
  - if *A* exists, the result of some modification of *M* that is not
239
  `memory_order_seq_cst` and that does not happen before *A*, or
240
  - if *A* does not exist, the result of some modification of *M* that is
241
  not `memory_order_seq_cst`.
242
 
243
- Although it is not explicitly required that *S* include locks, it can
244
- always be extended to an order that does include lock and unlock
245
- operations, since the ordering between those is already included in the
246
- “happens before” ordering.
247
 
248
  For an atomic operation *B* that reads the value of an atomic object
249
  *M*, if there is a `memory_order_seq_cst` fence *X* sequenced before
250
  *B*, then *B* observes either the last `memory_order_seq_cst`
251
  modification of *M* preceding *X* in the total order *S* or a later
@@ -273,21 +336,24 @@ later than *A* in the modification order of *M* if:
273
  before *B*, and *A* precedes *Y* in *S*, or
274
  - there are `memory_order_seq_cst` fences *X* and *Y* such that *A* is
275
  sequenced before *X*, *Y* is sequenced before *B*, and *X* precedes
276
  *Y* in *S*.
277
 
278
- `memory_order_seq_cst` ensures sequential consistency only for a program
279
- that is free of data races and uses exclusively `memory_order_seq_cst`
280
- operations. Any use of weaker ordering will invalidate this guarantee
281
- unless extreme care is used. In particular, `memory_order_seq_cst`
282
- fences ensure a total order only for the fences themselves. Fences
283
- cannot, in general, be used to restore sequential consistency for atomic
284
- operations with weaker ordering specifications.
 
285
 
286
  Implementations should ensure that no “out-of-thin-air” values are
287
  computed that circularly depend on their own computation.
288
 
 
 
289
  For example, with `x` and `y` initially zero,
290
 
291
  ``` cpp
292
  // Thread 1:
293
  r1 = y.load(memory_order_relaxed);
@@ -303,10 +369,14 @@ y.store(r2, memory_order_relaxed);
303
  should not produce `r1 == r2 == 42`, since the store of 42 to `y` is
304
  only possible if the store to `x` stores `42`, which circularly depends
305
  on the store to `y` storing `42`. Note that without this restriction,
306
  such an execution is possible.
307
 
 
 
 
 
308
  The recommendation similarly disallows `r1 == r2 == 42` in the following
309
  example, with `x` and `y` again initially zero:
310
 
311
  ``` cpp
312
  // Thread 1:
@@ -318,10 +388,12 @@ if (r1 == 42) y.store(42, memory_order_relaxed);
318
  // Thread 2:
319
  r2 = y.load(memory_order_relaxed);
320
  if (r2 == 42) x.store(42, memory_order_relaxed);
321
  ```
322
 
 
 
323
  Atomic read-modify-write operations shall always read the last value (in
324
  the modification order) written before the write associated with the
325
  read-modify-write operation.
326
 
327
  Implementations should make atomic stores visible to atomic loads within
@@ -363,22 +435,27 @@ types are always lock-free.
363
  The function `atomic_is_lock_free` ([[atomics.types.operations]])
364
  indicates whether the object is lock-free. In any given program
365
  execution, the result of the lock-free query shall be consistent for all
366
  pointers of the same type.
367
 
368
- Operations that are lock-free should also be address-free. That is,
369
- atomic operations on the same memory location via two different
 
 
 
370
  addresses will communicate atomically. The implementation should not
371
  depend on any per-process state. This restriction enables communication
372
  by memory that is mapped into a process more than once and by memory
373
- that is shared between two processes.
374
 
375
- ## Atomic types <a id="atomics.types.generic">[[atomics.types.generic]]</a>
376
 
377
  ``` cpp
378
  namespace std {
379
  template <class T> struct atomic {
 
 
380
  bool is_lock_free() const volatile noexcept;
381
  bool is_lock_free() const noexcept;
382
  void store(T, memory_order = memory_order_seq_cst) volatile noexcept;
383
  void store(T, memory_order = memory_order_seq_cst) noexcept;
384
  T load(memory_order = memory_order_seq_cst) const volatile noexcept;
@@ -402,30 +479,311 @@ namespace std {
402
  atomic& operator=(const atomic&) = delete;
403
  atomic& operator=(const atomic&) volatile = delete;
404
  T operator=(T) volatile noexcept;
405
  T operator=(T) noexcept;
406
  };
 
 
407
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  template <> struct atomic<integral> {
 
 
 
409
  bool is_lock_free() const volatile noexcept;
410
  bool is_lock_free() const noexcept;
411
  void store(integral, memory_order = memory_order_seq_cst) volatile noexcept;
412
  void store(integral, memory_order = memory_order_seq_cst) noexcept;
413
  integral load(memory_order = memory_order_seq_cst) const volatile noexcept;
414
  integral load(memory_order = memory_order_seq_cst) const noexcept;
415
  operator integral() const volatile noexcept;
416
  operator integral() const noexcept;
417
  integral exchange(integral, memory_order = memory_order_seq_cst) volatile noexcept;
418
  integral exchange(integral, memory_order = memory_order_seq_cst) noexcept;
419
- bool compare_exchange_weak(integral&, integral, memory_order, memory_order) volatile noexcept;
420
- bool compare_exchange_weak(integral&, integral, memory_order, memory_order) noexcept;
421
- bool compare_exchange_strong(integral&, integral, memory_order, memory_order) volatile noexcept;
422
- bool compare_exchange_strong(integral&, integral, memory_order, memory_order) noexcept;
423
- bool compare_exchange_weak(integral&, integral, memory_order = memory_order_seq_cst) volatile noexcept;
424
- bool compare_exchange_weak(integral&, integral, memory_order = memory_order_seq_cst) noexcept;
425
- bool compare_exchange_strong(integral&, integral, memory_order = memory_order_seq_cst) volatile noexcept;
426
- bool compare_exchange_strong(integral&, integral, memory_order = memory_order_seq_cst) noexcept;
 
 
 
 
 
 
 
 
427
  integral fetch_add(integral, memory_order = memory_order_seq_cst) volatile noexcept;
428
  integral fetch_add(integral, memory_order = memory_order_seq_cst) noexcept;
429
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) volatile noexcept;
430
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) noexcept;
431
  integral fetch_and(integral, memory_order = memory_order_seq_cst) volatile noexcept;
@@ -460,12 +818,63 @@ namespace std {
460
  integral operator|=(integral) volatile noexcept;
461
  integral operator|=(integral) noexcept;
462
  integral operator^=(integral) volatile noexcept;
463
  integral operator^=(integral) noexcept;
464
  };
 
 
465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466
  template <class T> struct atomic<T*> {
 
 
 
467
  bool is_lock_free() const volatile noexcept;
468
  bool is_lock_free() const noexcept;
469
  void store(T*, memory_order = memory_order_seq_cst) volatile noexcept;
470
  void store(T*, memory_order = memory_order_seq_cst) noexcept;
471
  T* load(memory_order = memory_order_seq_cst) const volatile noexcept;
@@ -509,435 +918,134 @@ namespace std {
509
  T* operator-=(ptrdiff_t) noexcept;
510
  };
511
  }
512
  ```
513
 
514
- There is a generic class template `atomic<T>`. The type of the template
515
- argument `T` shall be trivially copyable ([[basic.types]]). Type
516
- arguments that are not also statically initializable may be difficult to
517
- use.
518
-
519
- The semantics of the operations on specializations of `atomic` are
520
- defined in  [[atomics.types.operations]].
521
-
522
- Specializations and instantiations of the `atomic` template shall have a
523
- deleted copy constructor, a deleted copy assignment operator, and a
524
- constexpr value constructor.
525
-
526
- There shall be explicit specializations of the `atomic` template for the
527
- integral types `char`, `signed char`, `unsigned char`, `short`,
528
- `unsigned short`, `int`, `unsigned int`, `long`, `unsigned long`,
529
- `long long`, `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`,
530
- and any other types needed by the typedefs in the header `<cstdint>`.
531
- For each integral type *integral*, the specialization `atomic<integral>`
532
- provides additional atomic operations appropriate to integral types.
533
- There shall be a specialization `atomic<bool>` which provides the
534
- general atomic operations as specified in
535
- [[atomics.types.operations.general]].
536
-
537
- The atomic integral specializations and the specialization
538
- `atomic<bool>` shall have standard layout. They shall each have a
539
- trivial default constructor and a trivial destructor. They shall each
540
- support aggregate initialization syntax.
541
-
542
- There shall be pointer partial specializations of the `atomic` class
543
- template. These specializations shall have standard layout, trivial
544
- default constructors, and trivial destructors. They shall each support
545
- aggregate initialization syntax.
546
-
547
- There shall be named types corresponding to the integral specializations
548
- of `atomic`, as specified in Table  [[tab:atomics.integral]], and a
549
- named type `atomic_bool` corresponding to the specified `atomic<bool>`.
550
- Each named type is either a typedef to the corresponding specialization
551
- or a base class of the corresponding specialization. If it is a base
552
- class, it shall support the same member functions as the corresponding
553
- specialization.
554
-
555
- There shall be atomic typedefs corresponding to the typedefs in the
556
- header `<inttypes.h>` as specified in Table  [[tab:atomics.typedefs]].
557
-
558
- The representation of an atomic specialization need not have the same
559
- size as its corresponding argument type. Specializations should have the
560
- same size whenever possible, as this reduces the effort required to port
561
- existing code.
562
-
563
- ## Operations on atomic types <a id="atomics.types.operations">[[atomics.types.operations]]</a>
564
-
565
- ### General operations on atomic types <a id="atomics.types.operations.general">[[atomics.types.operations.general]]</a>
566
-
567
- The implementation shall provide the functions and function templates
568
- identified as “general operations on atomic types” in  [[atomics.syn]].
569
-
570
- In the declarations of these functions and function templates, the name
571
- *atomic-type* refers to either `atomic<T>` or to a named base class for
572
- `T` from Table  [[tab:atomics.integral]] or inferred from Table 
573
- [[tab:atomics.typedefs]].
574
-
575
- ### Templated operations on atomic types <a id="atomics.types.operations.templ">[[atomics.types.operations.templ]]</a>
576
-
577
- The implementation shall declare but not define the function templates
578
- identified as “templated operations on atomic types” in 
579
- [[atomics.syn]].
580
-
581
- ### Arithmetic operations on atomic types <a id="atomics.types.operations.arith">[[atomics.types.operations.arith]]</a>
582
-
583
- The implementation shall provide the functions and function template
584
- specializations identified as “arithmetic operations on atomic types”
585
- in  [[atomics.syn]].
586
-
587
- In the declarations of these functions and function template
588
- specializations, the name *integral* refers to an integral type and the
589
- name *atomic-integral* refers to either `atomic<integral>` or to a named
590
- base class for `integral` from Table  [[tab:atomics.integral]] or
591
- inferred from Table  [[tab:atomics.typedefs]].
592
-
593
- ### Operations on atomic pointer types <a id="atomics.types.operations.pointer">[[atomics.types.operations.pointer]]</a>
594
-
595
- The implementation shall provide the function template specializations
596
- identified as “partial specializations for pointers” in 
597
- [[atomics.syn]].
598
-
599
- ### Requirements for operations on atomic types <a id="atomics.types.operations.req">[[atomics.types.operations.req]]</a>
600
-
601
- There are only a few kinds of operations on atomic types, though there
602
- are many instances on those kinds. This section specifies each general
603
- kind. The specific instances are defined in [[atomics.types.generic]],
604
- [[atomics.types.operations.general]],
605
- [[atomics.types.operations.arith]], and
606
- [[atomics.types.operations.pointer]].
607
-
608
- In the following operation definitions:
609
-
610
- - an *A* refers to one of the atomic types.
611
- - a *C* refers to its corresponding non-atomic type.
612
- - an *M* refers to type of the other argument for arithmetic operations.
613
- For integral atomic types, *M* is *C*. For atomic address types, *M*
614
- is `std::ptrdiff_t`.
615
- - the non-member functions not ending in `_explicit` have the semantics
616
- of their corresponding `_explicit` functions with `memory_order`
617
- arguments of `memory_order_seq_cst`.
618
-
619
- Many operations are volatile-qualified. The “volatile as device
620
- register” semantics have not changed in the standard. This qualification
621
- means that volatility is preserved when applying these operations to
622
- volatile objects. It does not mean that operations on non-volatile
623
- objects become volatile. Thus, volatile qualified operations on
624
- non-volatile objects may be merged under some conditions.
625
 
626
  ``` cpp
627
- A::A() noexcept = default;
 
628
  ```
629
 
630
- *Effects:* leaves the atomic object in an uninitialized state. These
631
- semantics ensure compatibility with C.
 
 
632
 
633
  ``` cpp
634
- constexpr A::A(C desired) noexcept;
 
635
  ```
636
 
637
- *Effects:* Initializes the object with the value `desired`.
638
- Initialization is not an atomic operation ([[intro.multithread]]). it
639
- is possible to have an access to an atomic object `A` race with its
640
- construction, for example by communicating the address of the
641
- just-constructed object `A` to another thread via `memory_order_relaxed`
642
- operations on a suitable atomic pointer variable, and then immediately
643
- accessing `A` in the receiving thread. This results in undefined
644
- behavior.
645
 
646
  ``` cpp
647
- #define ATOMIC_VAR_INIT(value) see below
 
648
  ```
649
 
650
- The macro expands to a token sequence suitable for constant
651
- initialization of an atomic variable of static storage duration of a
652
- type that is initialization-compatible with *value*. This operation may
653
- need to initialize locks. Concurrent access to the variable being
654
- initialized, even via an atomic operation, constitutes a data race.
655
 
656
  ``` cpp
657
- atomic<int> v = ATOMIC_VAR_INIT(5);
 
658
  ```
659
 
 
 
660
  ``` cpp
661
- bool atomic_is_lock_free(const volatile A* object) noexcept;
662
- bool atomic_is_lock_free(const A* object) noexcept;
663
- bool A::is_lock_free() const volatile noexcept;
664
- bool A::is_lock_free() const noexcept;
665
  ```
666
 
667
- *Returns:* True if the object’s operations are lock-free, false
668
- otherwise.
 
 
 
 
 
 
 
 
 
 
669
 
670
  ``` cpp
671
- void atomic_init(volatile A* object, C desired) noexcept;
672
- void atomic_init(A* object, C desired) noexcept;
 
 
673
  ```
674
 
675
  *Effects:* Non-atomically initializes `*object` with value `desired`.
676
  This function shall only be applied to objects that have been default
677
- constructed, and then only once. These semantics ensure compatibility
678
- with C. Concurrent access from another thread, even via an atomic
679
- operation, constitutes a data race.
680
 
681
- ``` cpp
682
- void atomic_store(volatile A* object, C desired) noexcept;
683
- void atomic_store(A* object, C desired) noexcept;
684
- void atomic_store_explicit(volatile A* object, C desired, memory_order order) noexcept;
685
- void atomic_store_explicit(A* object, C desired, memory_order order) noexcept;
686
- void A::store(C desired, memory_order order = memory_order_seq_cst) volatile noexcept;
687
- void A::store(C desired, memory_order order = memory_order_seq_cst) noexcept;
688
- ```
689
 
690
- *Requires:* The `order` argument shall not be `memory_order_consume`,
691
- `memory_order_acquire`, nor `memory_order_acq_rel`.
692
 
693
- *Effects:* Atomically replaces the value pointed to by `object` or by
694
- `this` with the value of `desired`. Memory is affected according to the
695
- value of `order`.
696
-
697
- ``` cpp
698
- C A::operator=(C desired) volatile noexcept;
699
- C A::operator=(C desired) noexcept;
700
- ```
701
-
702
- *Effects:* `store(desired)`
703
-
704
- *Returns:* `desired`
705
-
706
- ``` cpp
707
- C atomic_load(const volatile A* object) noexcept;
708
- C atomic_load(const A* object) noexcept;
709
- C atomic_load_explicit(const volatile A* object, memory_order) noexcept;
710
- C atomic_load_explicit(const A* object, memory_order) noexcept;
711
- C A::load(memory_order order = memory_order_seq_cst) const volatile noexcept;
712
- C A::load(memory_order order = memory_order_seq_cst) const noexcept;
713
- ```
714
-
715
- *Requires:* The `order` argument shall not be `memory_order_release` nor
716
- `memory_order_acq_rel`.
717
-
718
- *Effects:* Memory is affected according to the value of `order`.
719
-
720
- *Returns:* Atomically returns the value pointed to by `object` or by
721
- `this`.
722
-
723
- ``` cpp
724
- A::operator C() const volatile noexcept;
725
- A::operator C() const noexcept;
726
- ```
727
-
728
- *Effects:* `load()`
729
-
730
- *Returns:* The result of `load()`.
731
-
732
- ``` cpp
733
- C atomic_exchange(volatile A* object, C desired) noexcept;
734
- C atomic_exchange(A* object, C desired) noexcept;
735
- C atomic_exchange_explicit(volatile A* object, C desired, memory_order) noexcept;
736
- C atomic_exchange_explicit(A* object, C desired, memory_order) noexcept;
737
- C A::exchange(C desired, memory_order order = memory_order_seq_cst) volatile noexcept;
738
- C A::exchange(C desired, memory_order order = memory_order_seq_cst) noexcept;
739
- ```
740
-
741
- *Effects:* Atomically replaces the value pointed to by `object` or by
742
- `this` with `desired`. Memory is affected according to the value of
743
- `order`. These operations are atomic read-modify-write
744
- operations ([[intro.multithread]]).
745
-
746
- *Returns:* Atomically returns the value pointed to by `object` or by
747
- `this` immediately before the effects.
748
-
749
- ``` cpp
750
- bool atomic_compare_exchange_weak(volatile A* object, C* expected, C desired) noexcept;
751
- bool atomic_compare_exchange_weak(A* object, C* expected, C desired) noexcept;
752
- bool atomic_compare_exchange_strong(volatile A* object, C* expected, C desired) noexcept;
753
- bool atomic_compare_exchange_strong(A* object, C* expected, C desired) noexcept;
754
- bool atomic_compare_exchange_weak_explicit(volatile A* object, C* expected, C desired,
755
- memory_order success, memory_order failure) noexcept;
756
- bool atomic_compare_exchange_weak_explicit(A* object, C* expected, C desired,
757
- memory_order success, memory_order failure) noexcept;
758
- bool atomic_compare_exchange_strong_explicit(volatile A* object, C* expected, C desired,
759
- memory_order success, memory_order failure) noexcept;
760
- bool atomic_compare_exchange_strong_explicit(A* object, C* expected, C desired,
761
- memory_order success, memory_order failure) noexcept;
762
- bool A::compare_exchange_weak(C& expected, C desired,
763
- memory_order success, memory_order failure) volatile noexcept;
764
- bool A::compare_exchange_weak(C& expected, C desired,
765
- memory_order success, memory_order failure) noexcept;
766
- bool A::compare_exchange_strong(C& expected, C desired,
767
- memory_order success, memory_order failure) volatile noexcept;
768
- bool A::compare_exchange_strong(C& expected, C desired,
769
- memory_order success, memory_order failure) noexcept;
770
- bool A::compare_exchange_weak(C& expected, C desired,
771
- memory_order order = memory_order_seq_cst) volatile noexcept;
772
- bool A::compare_exchange_weak(C& expected, C desired,
773
- memory_order order = memory_order_seq_cst) noexcept;
774
- bool A::compare_exchange_strong(C& expected, C desired,
775
- memory_order order = memory_order_seq_cst) volatile noexcept;
776
- bool A::compare_exchange_strong(C& expected, C desired,
777
- memory_order order = memory_order_seq_cst) noexcept;
778
- ```
779
-
780
- *Requires:* The `failure` argument shall not be `memory_order_release`
781
- nor `memory_order_acq_rel`. The `failure` argument shall be no stronger
782
- than the `success` argument.
783
-
784
- *Effects:* Atomically, compares the contents of the memory pointed to by
785
- `object` or by `this` for equality with that in `expected`, and if true,
786
- replaces the contents of the memory pointed to by `object` or by `this`
787
- with that in `desired`, and if false, updates the contents of the memory
788
- in `expected` with the contents of the memory pointed to by `object` or
789
- by `this`. Further, if the comparison is true, memory is affected
790
- according to the value of `success`, and if the comparison is false,
791
- memory is affected according to the value of `failure`. When only one
792
- `memory_order` argument is supplied, the value of `success` is `order`,
793
- and the value of `failure` is `order` except that a value of
794
- `memory_order_acq_rel` shall be replaced by the value
795
- `memory_order_acquire` and a value of `memory_order_release` shall be
796
- replaced by the value `memory_order_relaxed`. If the operation returns
797
- `true`, these operations are atomic read-modify-write
798
- operations ([[intro.multithread]]). Otherwise, these operations are
799
- atomic load operations.
800
-
801
- *Returns:* The result of the comparison.
802
-
803
- For example, the effect of `atomic_compare_exchange_strong` is
804
-
805
- ``` cpp
806
- if (memcmp(object, expected, sizeof(*object)) == 0)
807
- memcpy(object, &desired, sizeof(*object));
808
- else
809
- memcpy(expected, object, sizeof(*object));
810
- ```
811
-
812
- the expected use of the compare-and-exchange operations is as follows.
813
- The compare-and-exchange operations will update `expected` when another
814
- iteration of the loop is needed.
815
-
816
- ``` cpp
817
- expected = current.load();
818
- do {
819
- desired = function(expected);
820
- } while (!current.compare_exchange_weak(expected, desired));
821
- ```
822
-
823
- Implementations should ensure that weak compare-and-exchange operations
824
- do not consistently return `false` unless either the atomic object has
825
- value different from `expected` or there are concurrent modifications to
826
- the atomic object.
827
-
828
- A weak compare-and-exchange operation may fail spuriously. That is, even
829
- when the contents of memory referred to by `expected` and `object` are
830
- equal, it may return false and store back to `expected` the same memory
831
- contents that were originally there. This spurious failure enables
832
- implementation of compare-and-exchange on a broader class of machines,
833
- e.g., load-locked store-conditional machines. A consequence of spurious
834
- failure is that nearly all uses of weak compare-and-exchange will be in
835
- a loop.
836
-
837
- When a compare-and-exchange is in a loop, the weak version will yield
838
- better performance on some platforms. When a weak compare-and-exchange
839
- would require a loop and a strong one would not, the strong one is
840
- preferable.
841
-
842
- The `memcpy` and `memcmp` semantics of the compare-and-exchange
843
- operations may result in failed comparisons for values that compare
844
- equal with `operator==` if the underlying type has padding bits, trap
845
- bits, or alternate representations of the same value. Thus,
846
- `compare_exchange_strong` should be used with extreme care. On the other
847
- hand, `compare_exchange_weak` should converge rapidly.
848
-
849
- The following operations perform arithmetic computations. The key,
850
- operator, and computation correspondence is:
851
-
852
- **Table: Atomic arithmetic computations** <a id="tab:atomic.arithmetic.computations">[tab:atomic.arithmetic.computations]</a>
853
-
854
- | | | | | | |
855
- | ----- | --- | -------------------- | ----- | --- | -------------------- |
856
- | `add` | `+` | addition | `sub` | `-` | subtraction |
857
- | `or` | `|` | bitwise inclusive or | `xor` | `^` | bitwise exclusive or |
858
- | `and` | `&` | bitwise and | | | |
859
-
860
- ``` cpp
861
- C atomic_fetch_key(volatile A* object, M operand) noexcept;
862
- C atomic_fetch_key(A* object, M operand) noexcept;
863
- C atomic_fetch_key_explicit(volatile A* object, M operand, memory_order order) noexcept;
864
- C atomic_fetch_key_explicit(A* object, M operand, memory_order order) noexcept;
865
- C A::fetch_key(M operand, memory_order order = memory_order_seq_cst) volatile noexcept;
866
- C A::fetch_key(M operand, memory_order order = memory_order_seq_cst) noexcept;
867
- ```
868
-
869
- *Effects:* Atomically replaces the value pointed to by `object` or by
870
- `this` with the result of the *computation* applied to the value pointed
871
- to by `object` or by `this` and the given `operand`. Memory is affected
872
- according to the value of `order`. These operations are atomic
873
- read-modify-write operations ([[intro.multithread]]).
874
-
875
- *Returns:* Atomically, the value pointed to by `object` or by `this`
876
- immediately before the effects.
877
-
878
- For signed integer types, arithmetic is defined to use two’s complement
879
- representation. There are no undefined results. For address types, the
880
- result may be an undefined address, but the operations otherwise have no
881
- undefined behavior.
882
-
883
- ``` cpp
884
- C A::operator op=(M operand) volatile noexcept;
885
- C A::operator op=(M operand) noexcept;
886
- ```
887
-
888
- *Effects:* `fetch_`*`key`*`(operand)`
889
-
890
- *Returns:* `fetch_`*`key`*`(operand) op operand`
891
-
892
- ``` cpp
893
- C A::operator++(int) volatile noexcept;
894
- C A::operator++(int) noexcept;
895
- ```
896
-
897
- *Returns:* `fetch_add(1)`
898
-
899
- ``` cpp
900
- C A::operator--(int) volatile noexcept;
901
- C A::operator--(int) noexcept;
902
- ```
903
-
904
- *Returns:* `fetch_sub(1)`
905
-
906
- ``` cpp
907
- C A::operator++() volatile noexcept;
908
- C A::operator++() noexcept;
909
- ```
910
-
911
- *Effects:* `fetch_add(1)`
912
-
913
- *Returns:* `fetch_add(1) + 1`
914
-
915
- ``` cpp
916
- C A::operator--() volatile noexcept;
917
- C A::operator--() noexcept;
918
- ```
919
-
920
- *Effects:* `fetch_sub(1)`
921
-
922
- *Returns:* `fetch_sub(1) - 1`
923
 
924
  ## Flag type and operations <a id="atomics.flag">[[atomics.flag]]</a>
925
 
926
  ``` cpp
927
  namespace std {
928
- typedef struct atomic_flag {
929
  bool test_and_set(memory_order = memory_order_seq_cst) volatile noexcept;
930
  bool test_and_set(memory_order = memory_order_seq_cst) noexcept;
931
  void clear(memory_order = memory_order_seq_cst) volatile noexcept;
932
  void clear(memory_order = memory_order_seq_cst) noexcept;
933
 
934
  atomic_flag() noexcept = default;
935
  atomic_flag(const atomic_flag&) = delete;
936
  atomic_flag& operator=(const atomic_flag&) = delete;
937
  atomic_flag& operator=(const atomic_flag&) volatile = delete;
938
- } atomic_flag;
939
 
940
  bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
941
  bool atomic_flag_test_and_set(atomic_flag*) noexcept;
942
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
943
  bool atomic_flag_test_and_set_explicit(atomic_flag*, memory_order) noexcept;
@@ -951,20 +1059,17 @@ namespace std {
951
  ```
952
 
953
  The `atomic_flag` type provides the classic test-and-set functionality.
954
  It has two states, set and clear.
955
 
956
- Operations on an object of type `atomic_flag` shall be lock-free. Hence
957
- the operations should also be address-free. No other type requires
958
- lock-free operations, so the `atomic_flag` type is the minimum
959
- hardware-implemented type needed to conform to this International
960
- standard. The remaining types can be emulated with `atomic_flag`, though
961
- with less than ideal properties.
962
 
963
- The `atomic_flag` type shall have standard layout. It shall have a
964
- trivial default constructor, a deleted copy constructor, a deleted copy
965
- assignment operator, and a trivial destructor.
 
 
966
 
967
  The macro `ATOMIC_FLAG_INIT` shall be defined in such a way that it can
968
  be used to initialize an object of type `atomic_flag` to the clear
969
  state. The macro can be used in the form:
970
 
@@ -986,11 +1091,11 @@ bool atomic_flag_test_and_set_explicit(atomic_flag* object, memory_order order)
986
  bool atomic_flag::test_and_set(memory_order order = memory_order_seq_cst) volatile noexcept;
987
  bool atomic_flag::test_and_set(memory_order order = memory_order_seq_cst) noexcept;
988
  ```
989
 
990
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
991
- to true. Memory is affected according to the value of `order`. These
992
  operations are atomic read-modify-write
993
  operations ([[intro.multithread]]).
994
 
995
  *Returns:* Atomically, the value of the object immediately before the
996
  effects.
@@ -1006,11 +1111,11 @@ void atomic_flag::clear(memory_order order = memory_order_seq_cst) noexcept;
1006
 
1007
  *Requires:* The `order` argument shall not be `memory_order_consume`,
1008
  `memory_order_acquire`, nor `memory_order_acq_rel`.
1009
 
1010
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
1011
- to false. Memory is affected according to the value of `order`.
1012
 
1013
  ## Fences <a id="atomics.fences">[[atomics.fences]]</a>
1014
 
1015
  This section introduces synchronization primitives called *fences*.
1016
  Fences can have acquire semantics, release semantics, or both. A fence
@@ -1039,11 +1144,11 @@ release sequence headed by *A*.
1039
 
1040
  ``` cpp
1041
  extern "C" void atomic_thread_fence(memory_order order) noexcept;
1042
  ```
1043
 
1044
- *Effects:* depending on the value of `order`, this operation:
1045
 
1046
  - has no effects, if `order == memory_order_relaxed`;
1047
  - is an acquire fence, if
1048
  `order == memory_order_acquire || order == memory_order_consume`;
1049
  - is a release fence, if `order == memory_order_release`;
@@ -1058,32 +1163,30 @@ extern "C" void atomic_signal_fence(memory_order order) noexcept;
1058
 
1059
  *Effects:* Equivalent to `atomic_thread_fence(order)`, except that the
1060
  resulting ordering constraints are established only between a thread and
1061
  a signal handler executed in the same thread.
1062
 
1063
- *Note:* `atomic_signal_fence` can be used to specify the order in which
1064
- actions performed by the thread become visible to the signal handler.
1065
-
1066
- *Note:* compiler optimizations and reorderings of loads and stores are
1067
  inhibited in the same way as with `atomic_thread_fence`, but the
1068
  hardware fence instructions that `atomic_thread_fence` would have
1069
- inserted are not emitted.
1070
 
1071
  <!-- Link reference definitions -->
1072
  [atomics]: #atomics
 
1073
  [atomics.fences]: #atomics.fences
1074
  [atomics.flag]: #atomics.flag
1075
  [atomics.general]: #atomics.general
1076
  [atomics.lockfree]: #atomics.lockfree
 
1077
  [atomics.order]: #atomics.order
1078
  [atomics.syn]: #atomics.syn
1079
  [atomics.types.generic]: #atomics.types.generic
 
 
1080
  [atomics.types.operations]: #atomics.types.operations
1081
- [atomics.types.operations.arith]: #atomics.types.operations.arith
1082
- [atomics.types.operations.general]: #atomics.types.operations.general
1083
- [atomics.types.operations.pointer]: #atomics.types.operations.pointer
1084
- [atomics.types.operations.req]: #atomics.types.operations.req
1085
- [atomics.types.operations.templ]: #atomics.types.operations.templ
1086
  [basic.types]: basic.md#basic.types
1087
  [intro.multithread]: intro.md#intro.multithread
1088
- [tab:atomics.integral]: #tab:atomics.integral
1089
- [tab:atomics.typedefs]: #tab:atomics.typedefs
 
39
  #define ATOMIC_INT_LOCK_FREE unspecified
40
  #define ATOMIC_LONG_LOCK_FREE unspecified
41
  #define ATOMIC_LLONG_LOCK_FREE unspecified
42
  #define ATOMIC_POINTER_LOCK_FREE unspecified
43
 
44
+ // [atomics.types.generic], atomic
45
  template<class T> struct atomic;
46
+ // [atomics.types.pointer], partial specialization for pointers
47
  template<class T> struct atomic<T*>;
48
 
49
+ // [atomics.nonmembers], non-member functions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  template<class T>
51
+ bool atomic_is_lock_free(const volatile atomic<T>*) noexcept;
52
  template<class T>
53
+ bool atomic_is_lock_free(const atomic<T>*) noexcept;
54
  template<class T>
55
+ void atomic_init(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
56
  template<class T>
57
+ void atomic_init(atomic<T>*, typename atomic<T>::value_type) noexcept;
58
  template<class T>
59
+ void atomic_store(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
60
  template<class T>
61
+ void atomic_store(atomic<T>*, typename atomic<T>::value_type) noexcept;
62
  template<class T>
63
+ void atomic_store_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
64
+ memory_order) noexcept;
65
  template<class T>
66
+ void atomic_store_explicit(atomic<T>*, typename atomic<T>::value_type,
67
+ memory_order) noexcept;
68
  template<class T>
69
+ T atomic_load(const volatile atomic<T>*) noexcept;
70
  template<class T>
71
+ T atomic_load(const atomic<T>*) noexcept;
72
  template<class T>
73
+ T atomic_load_explicit(const volatile atomic<T>*, memory_order) noexcept;
74
  template<class T>
75
+ T atomic_load_explicit(const atomic<T>*, memory_order) noexcept;
76
  template<class T>
77
+ T atomic_exchange(volatile atomic<T>*, T) noexcept;
78
  template<class T>
79
+ T atomic_exchange(atomic<T>*, typename atomic<T>::value_type) noexcept;
80
  template<class T>
81
+ T atomic_exchange_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
82
+ memory_order) noexcept;
83
  template<class T>
84
+ T atomic_exchange_explicit(atomic<T>*, typename atomic<T>::value_type,
85
+ memory_order) noexcept;
86
  template<class T>
87
+ bool atomic_compare_exchange_weak(volatile atomic<T>*,
88
+ typename atomic<T>::value_type*,
89
+ typename atomic<T>::value_type) noexcept;
90
  template<class T>
91
+ bool atomic_compare_exchange_weak(atomic<T>*,
92
+ typename atomic<T>::value_type*,
93
+ typename atomic<T>::value_type) noexcept;
94
  template<class T>
95
+ bool atomic_compare_exchange_strong(volatile atomic<T>*,
96
+ typename atomic<T>::value_type*,
97
+ typename atomic<T>::value_type) noexcept;
98
  template<class T>
99
+ bool atomic_compare_exchange_strong(atomic<T>*,
100
+ typename atomic<T>::value_type*,
101
+ typename atomic<T>::value_type) noexcept;
102
+ template<class T>
103
+ bool atomic_compare_exchange_weak_explicit(volatile atomic<T>*,
104
+ typename atomic<T>::value_type*,
105
+ typename atomic<T>::value_type,
106
+ memory_order, memory_order) noexcept;
107
+ template<class T>
108
+ bool atomic_compare_exchange_weak_explicit(atomic<T>*,
109
+ typename atomic<T>::value_type*,
110
+ typename atomic<T>::value_type,
111
+ memory_order, memory_order) noexcept;
112
+ template<class T>
113
+ bool atomic_compare_exchange_strong_explicit(volatile atomic<T>*,
114
+ typename atomic<T>::value_type*,
115
+ typename atomic<T>::value_type,
116
+ memory_order, memory_order) noexcept;
117
+ template<class T>
118
+ bool atomic_compare_exchange_strong_explicit(atomic<T>*,
119
+ typename atomic<T>::value_type*,
120
+ typename atomic<T>::value_type,
121
+ memory_order, memory_order) noexcept;
 
 
 
 
 
 
 
 
122
 
123
  template <class T>
124
+ T atomic_fetch_add(volatile atomic<T>*, typename atomic<T>::difference_type) noexcept;
125
+ template <class T>
126
+ T atomic_fetch_add(atomic<T>*, typename atomic<T>::difference_type) noexcept;
127
+ template <class T>
128
+ T atomic_fetch_add_explicit(volatile atomic<T>*, typename atomic<T>::difference_type,
129
+ memory_order) noexcept;
130
+ template <class T>
131
+ T atomic_fetch_add_explicit(atomic<T>*, typename atomic<T>::difference_type,
132
+ memory_order) noexcept;
133
+ template <class T>
134
+ T atomic_fetch_sub(volatile atomic<T>*, typename atomic<T>::difference_type) noexcept;
135
+ template <class T>
136
+ T atomic_fetch_sub(atomic<T>*, typename atomic<T>::difference_type) noexcept;
137
+ template <class T>
138
+ T atomic_fetch_sub_explicit(volatile atomic<T>*, typename atomic<T>::difference_type,
139
+ memory_order) noexcept;
140
+ template <class T>
141
+ T atomic_fetch_sub_explicit(atomic<T>*, typename atomic<T>::difference_type,
142
+ memory_order) noexcept;
143
+ template <class T>
144
+ T atomic_fetch_and(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
145
+ template <class T>
146
+ T atomic_fetch_and(atomic<T>*, typename atomic<T>::value_type) noexcept;
147
+ template <class T>
148
+ T atomic_fetch_and_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
149
+ memory_order) noexcept;
150
+ template <class T>
151
+ T atomic_fetch_and_explicit(atomic<T>*, typename atomic<T>::value_type,
152
+ memory_order) noexcept;
153
+ template <class T>
154
+ T atomic_fetch_or(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
155
  template <class T>
156
+ T atomic_fetch_or(atomic<T>*, typename atomic<T>::value_type) noexcept;
157
  template <class T>
158
+ T atomic_fetch_or_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
159
+ memory_order) noexcept;
160
  template <class T>
161
+ T atomic_fetch_or_explicit(atomic<T>*, typename atomic<T>::value_type,
162
+ memory_order) noexcept;
163
  template <class T>
164
+ T atomic_fetch_xor(volatile atomic<T>*, typename atomic<T>::value_type) noexcept;
165
  template <class T>
166
+ T atomic_fetch_xor(atomic<T>*, typename atomic<T>::value_type) noexcept;
167
  template <class T>
168
+ T atomic_fetch_xor_explicit(volatile atomic<T>*, typename atomic<T>::value_type,
169
+ memory_order) noexcept;
170
  template <class T>
171
+ T atomic_fetch_xor_explicit(atomic<T>*, typename atomic<T>::value_type,
172
+ memory_order) noexcept;
173
 
174
+ // [atomics.types.operations], initialization
175
  #define ATOMIC_VAR_INIT(value) see below
176
 
177
+ // [atomics.alias], type aliases
178
+ using atomic_bool = atomic<bool>;
179
+ using atomic_char = atomic<char>;
180
+ using atomic_schar = atomic<signed char>;
181
+ using atomic_uchar = atomic<unsigned char>;
182
+ using atomic_short = atomic<short>;
183
+ using atomic_ushort = atomic<unsigned short>;
184
+ using atomic_int = atomic<int>;
185
+ using atomic_uint = atomic<unsigned int>;
186
+ using atomic_long = atomic<long>;
187
+ using atomic_ulong = atomic<unsigned long>;
188
+ using atomic_llong = atomic<long long>;
189
+ using atomic_ullong = atomic<unsigned long long>;
190
+ using atomic_char16_t = atomic<char16_t>;
191
+ using atomic_char32_t = atomic<char32_t>;
192
+ using atomic_wchar_t = atomic<wchar_t>;
193
+
194
+ using atomic_int8_t = atomic<int8_t>;
195
+ using atomic_uint8_t = atomic<uint8_t>;
196
+ using atomic_int16_t = atomic<int16_t>;
197
+ using atomic_uint16_t = atomic<uint16_t>;
198
+ using atomic_int32_t = atomic<int32_t>;
199
+ using atomic_uint32_t = atomic<uint32_t>;
200
+ using atomic_int64_t = atomic<int64_t>;
201
+ using atomic_uint64_t = atomic<uint64_t>;
202
+
203
+ using atomic_int_least8_t = atomic<int_least8_t>;
204
+ using atomic_uint_least8_t = atomic<uint_least8_t>;
205
+ using atomic_int_least16_t = atomic<int_least16_t>;
206
+ using atomic_uint_least16_t = atomic<uint_least16_t>;
207
+ using atomic_int_least32_t = atomic<int_least32_t>;
208
+ using atomic_uint_least32_t = atomic<uint_least32_t>;
209
+ using atomic_int_least64_t = atomic<int_least64_t>;
210
+ using atomic_uint_least64_t = atomic<uint_least64_t>;
211
+
212
+ using atomic_int_fast8_t = atomic<int_fast8_t>;
213
+ using atomic_uint_fast8_t = atomic<uint_fast8_t>;
214
+ using atomic_int_fast16_t = atomic<int_fast16_t>;
215
+ using atomic_uint_fast16_t = atomic<uint_fast16_t>;
216
+ using atomic_int_fast32_t = atomic<int_fast32_t>;
217
+ using atomic_uint_fast32_t = atomic<uint_fast32_t>;
218
+ using atomic_int_fast64_t = atomic<int_fast64_t>;
219
+ using atomic_uint_fast64_t = atomic<uint_fast64_t>;
220
+
221
+ using atomic_intptr_t = atomic<intptr_t>;
222
+ using atomic_uintptr_t = atomic<uintptr_t>;
223
+ using atomic_size_t = atomic<size_t>;
224
+ using atomic_ptrdiff_t = atomic<ptrdiff_t>;
225
+ using atomic_intmax_t = atomic<intmax_t>;
226
+ using atomic_uintmax_t = atomic<uintmax_t>;
227
+
228
  // [atomics.flag], flag type and operations
229
  struct atomic_flag;
230
  bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
231
  bool atomic_flag_test_and_set(atomic_flag*) noexcept;
232
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
 
241
  extern "C" void atomic_thread_fence(memory_order) noexcept;
242
  extern "C" void atomic_signal_fence(memory_order) noexcept;
243
  }
244
  ```
245
 
246
+ ## Type aliases <a id="atomics.alias">[[atomics.alias]]</a>
247
+
248
+ The type aliases `atomic_intN_t`, `atomic_uintN_t`, `atomic_intptr_t`,
249
+ and `atomic_uintptr_t` are defined if and only if `intN_t`, `uintN_t`,
250
+ `intptr_t`, and `uintptr_t` are defined, respectively.
251
+
252
  ## Order and consistency <a id="atomics.order">[[atomics.order]]</a>
253
 
254
  ``` cpp
255
  namespace std {
256
+ enum memory_order {
257
  memory_order_relaxed, memory_order_consume, memory_order_acquire,
258
  memory_order_release, memory_order_acq_rel, memory_order_seq_cst
259
+ };
260
  }
261
  ```
262
 
263
  The enumeration `memory_order` specifies the detailed regular
264
  (non-atomic) memory synchronization order as defined in
 
268
  - `memory_order_relaxed`: no operation orders memory.
269
  - `memory_order_release`, `memory_order_acq_rel`, and
270
  `memory_order_seq_cst`: a store operation performs a release operation
271
  on the affected memory location.
272
  - `memory_order_consume`: a load operation performs a consume operation
273
+ on the affected memory location. \[*Note 1*: Prefer
274
+ `memory_order_acquire`, which provides stronger guarantees than
275
+ `memory_order_consume`. Implementations have found it infeasible to
276
+ provide performance better than that of `memory_order_acquire`.
277
+ Specification revisions are under consideration. — *end note*]
278
  - `memory_order_acquire`, `memory_order_acq_rel`, and
279
  `memory_order_seq_cst`: a load operation performs an acquire operation
280
  on the affected memory location.
281
 
282
+ [*Note 2*: Atomic operations specifying `memory_order_relaxed` are
283
+ relaxed with respect to memory ordering. Implementations must still
284
+ guarantee that any given atomic access to a particular atomic object be
285
+ indivisible with respect to all other atomic accesses to that
286
+ object. — *end note*]
287
 
288
  An atomic operation *A* that performs a release operation on an atomic
289
  object *M* synchronizes with an atomic operation *B* that performs an
290
  acquire operation on *M* and takes its value from any side effect in the
291
  release sequence headed by *A*.
 
301
  - if *A* exists, the result of some modification of *M* that is not
302
  `memory_order_seq_cst` and that does not happen before *A*, or
303
  - if *A* does not exist, the result of some modification of *M* that is
304
  not `memory_order_seq_cst`.
305
 
306
+ [*Note 3*: Although it is not explicitly required that *S* include
307
+ locks, it can always be extended to an order that does include lock and
308
+ unlock operations, since the ordering between those is already included
309
+ in the “happens before” ordering. — *end note*]
310
 
311
  For an atomic operation *B* that reads the value of an atomic object
312
  *M*, if there is a `memory_order_seq_cst` fence *X* sequenced before
313
  *B*, then *B* observes either the last `memory_order_seq_cst`
314
  modification of *M* preceding *X* in the total order *S* or a later
 
336
  before *B*, and *A* precedes *Y* in *S*, or
337
  - there are `memory_order_seq_cst` fences *X* and *Y* such that *A* is
338
  sequenced before *X*, *Y* is sequenced before *B*, and *X* precedes
339
  *Y* in *S*.
340
 
341
+ [*Note 4*: `memory_order_seq_cst` ensures sequential consistency only
342
+ for a program that is free of data races and uses exclusively
343
+ `memory_order_seq_cst` operations. Any use of weaker ordering will
344
+ invalidate this guarantee unless extreme care is used. In particular,
345
+ `memory_order_seq_cst` fences ensure a total order only for the fences
346
+ themselves. Fences cannot, in general, be used to restore sequential
347
+ consistency for atomic operations with weaker ordering
348
+ specifications. — *end note*]
349
 
350
  Implementations should ensure that no “out-of-thin-air” values are
351
  computed that circularly depend on their own computation.
352
 
353
+ [*Note 5*:
354
+
355
  For example, with `x` and `y` initially zero,
356
 
357
  ``` cpp
358
  // Thread 1:
359
  r1 = y.load(memory_order_relaxed);
 
369
  should not produce `r1 == r2 == 42`, since the store of 42 to `y` is
370
  only possible if the store to `x` stores `42`, which circularly depends
371
  on the store to `y` storing `42`. Note that without this restriction,
372
  such an execution is possible.
373
 
374
+ — *end note*]
375
+
376
+ [*Note 6*:
377
+
378
  The recommendation similarly disallows `r1 == r2 == 42` in the following
379
  example, with `x` and `y` again initially zero:
380
 
381
  ``` cpp
382
  // Thread 1:
 
388
  // Thread 2:
389
  r2 = y.load(memory_order_relaxed);
390
  if (r2 == 42) x.store(42, memory_order_relaxed);
391
  ```
392
 
393
+ — *end note*]
394
+
395
  Atomic read-modify-write operations shall always read the last value (in
396
  the modification order) written before the write associated with the
397
  read-modify-write operation.
398
 
399
  Implementations should make atomic stores visible to atomic loads within
 
435
  The function `atomic_is_lock_free` ([[atomics.types.operations]])
436
  indicates whether the object is lock-free. In any given program
437
  execution, the result of the lock-free query shall be consistent for all
438
  pointers of the same type.
439
 
440
+ Atomic operations that are not lock-free are considered to potentially
441
+ block ([[intro.progress]]).
442
+
443
+ [*Note 1*: Operations that are lock-free should also be address-free.
444
+ That is, atomic operations on the same memory location via two different
445
  addresses will communicate atomically. The implementation should not
446
  depend on any per-process state. This restriction enables communication
447
  by memory that is mapped into a process more than once and by memory
448
+ that is shared between two processes. — *end note*]
449
 
450
+ ## Class template `atomic` <a id="atomics.types.generic">[[atomics.types.generic]]</a>
451
 
452
  ``` cpp
453
  namespace std {
454
  template <class T> struct atomic {
455
+ using value_type = T;
456
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
457
  bool is_lock_free() const volatile noexcept;
458
  bool is_lock_free() const noexcept;
459
  void store(T, memory_order = memory_order_seq_cst) volatile noexcept;
460
  void store(T, memory_order = memory_order_seq_cst) noexcept;
461
  T load(memory_order = memory_order_seq_cst) const volatile noexcept;
 
479
  atomic& operator=(const atomic&) = delete;
480
  atomic& operator=(const atomic&) volatile = delete;
481
  T operator=(T) volatile noexcept;
482
  T operator=(T) noexcept;
483
  };
484
+ }
485
+ ```
486
 
487
+ The template argument for `T` shall be trivially copyable (
488
+ [[basic.types]]).
489
+
490
+ [*Note 1*: Type arguments that are not also statically initializable
491
+ may be difficult to use. — *end note*]
492
+
493
+ The specialization `atomic<bool>` is a standard-layout struct.
494
+
495
+ [*Note 2*: The representation of an atomic specialization need not have
496
+ the same size as its corresponding argument type. Specializations should
497
+ have the same size whenever possible, as this reduces the effort
498
+ required to port existing code. — *end note*]
499
+
500
+ ### Operations on atomic types <a id="atomics.types.operations">[[atomics.types.operations]]</a>
501
+
502
+ [*Note 1*: Many operations are volatile-qualified. The “volatile as
503
+ device register” semantics have not changed in the standard. This
504
+ qualification means that volatility is preserved when applying these
505
+ operations to volatile objects. It does not mean that operations on
506
+ non-volatile objects become volatile. — *end note*]
507
+
508
+ ``` cpp
509
+ atomic() noexcept = default;
510
+ ```
511
+
512
+ *Effects:* Leaves the atomic object in an uninitialized state.
513
+
514
+ [*Note 1*: These semantics ensure compatibility with C. — *end note*]
515
+
516
+ ``` cpp
517
+ constexpr atomic(T desired) noexcept;
518
+ ```
519
+
520
+ *Effects:* Initializes the object with the value `desired`.
521
+ Initialization is not an atomic operation ([[intro.multithread]]).
522
+
523
+ [*Note 2*: It is possible to have an access to an atomic object `A`
524
+ race with its construction, for example by communicating the address of
525
+ the just-constructed object `A` to another thread via
526
+ `memory_order_relaxed` operations on a suitable atomic pointer variable,
527
+ and then immediately accessing `A` in the receiving thread. This results
528
+ in undefined behavior. — *end note*]
529
+
530
+ ``` cpp
531
+ #define ATOMIC_VAR_INIT(value) see below
532
+ ```
533
+
534
+ The macro expands to a token sequence suitable for constant
535
+ initialization of an atomic variable of static storage duration of a
536
+ type that is initialization-compatible with `value`.
537
+
538
+ [*Note 3*: This operation may need to initialize locks. — *end note*]
539
+
540
+ Concurrent access to the variable being initialized, even via an atomic
541
+ operation, constitutes a data race.
542
+
543
+ [*Example 1*:
544
+
545
+ ``` cpp
546
+ atomic<int> v = ATOMIC_VAR_INIT(5);
547
+ ```
548
+
549
+ — *end example*]
550
+
551
+ ``` cpp
552
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
553
+ ```
554
+
555
+ The `static` data member `is_always_lock_free` is `true` if the atomic
556
+ type’s operations are always lock-free, and `false` otherwise.
557
+
558
+ [*Note 4*: The value of `is_always_lock_free` is consistent with the
559
+ value of the corresponding `ATOMIC_..._LOCK_FREE` macro, if
560
+ defined. — *end note*]
561
+
562
+ ``` cpp
563
+ bool is_lock_free() const volatile noexcept;
564
+ bool is_lock_free() const noexcept;
565
+ ```
566
+
567
+ *Returns:* `true` if the object’s operations are lock-free, `false`
568
+ otherwise.
569
+
570
+ [*Note 5*: The return value of the `is_lock_free` member function is
571
+ consistent with the value of `is_always_lock_free` for the same
572
+ type. — *end note*]
573
+
574
+ ``` cpp
575
+ void store(T desired, memory_order order = memory_order_seq_cst) volatile noexcept;
576
+ void store(T desired, memory_order order = memory_order_seq_cst) noexcept;
577
+ ```
578
+
579
+ *Requires:* The `order` argument shall not be `memory_order_consume`,
580
+ `memory_order_acquire`, nor `memory_order_acq_rel`.
581
+
582
+ *Effects:* Atomically replaces the value pointed to by `this` with the
583
+ value of `desired`. Memory is affected according to the value of
584
+ `order`.
585
+
586
+ ``` cpp
587
+ T operator=(T desired) volatile noexcept;
588
+ T operator=(T desired) noexcept;
589
+ ```
590
+
591
+ *Effects:* Equivalent to: `store(desired)`.
592
+
593
+ *Returns:* `desired`.
594
+
595
+ ``` cpp
596
+ T load(memory_order order = memory_order_seq_cst) const volatile noexcept;
597
+ T load(memory_order order = memory_order_seq_cst) const noexcept;
598
+ ```
599
+
600
+ *Requires:* The `order` argument shall not be `memory_order_release` nor
601
+ `memory_order_acq_rel`.
602
+
603
+ *Effects:* Memory is affected according to the value of `order`.
604
+
605
+ *Returns:* Atomically returns the value pointed to by `this`.
606
+
607
+ ``` cpp
608
+ operator T() const volatile noexcept;
609
+ operator T() const noexcept;
610
+ ```
611
+
612
+ *Effects:* Equivalent to: `return load();`
613
+
614
+ ``` cpp
615
+ T exchange(T desired, memory_order order = memory_order_seq_cst) volatile noexcept;
616
+ T exchange(T desired, memory_order order = memory_order_seq_cst) noexcept;
617
+ ```
618
+
619
+ *Effects:* Atomically replaces the value pointed to by `this` with
620
+ `desired`. Memory is affected according to the value of `order`. These
621
+ operations are atomic read-modify-write
622
+ operations ([[intro.multithread]]).
623
+
624
+ *Returns:* Atomically returns the value pointed to by `this` immediately
625
+ before the effects.
626
+
627
+ ``` cpp
628
+ bool compare_exchange_weak(T& expected, T desired,
629
+ memory_order success, memory_order failure) volatile noexcept;
630
+ bool compare_exchange_weak(T& expected, T desired,
631
+ memory_order success, memory_order failure) noexcept;
632
+ bool compare_exchange_strong(T& expected, T desired,
633
+ memory_order success, memory_order failure) volatile noexcept;
634
+ bool compare_exchange_strong(T& expected, T desired,
635
+ memory_order success, memory_order failure) noexcept;
636
+ bool compare_exchange_weak(T& expected, T desired,
637
+ memory_order order = memory_order_seq_cst) volatile noexcept;
638
+ bool compare_exchange_weak(T& expected, T desired,
639
+ memory_order order = memory_order_seq_cst) noexcept;
640
+ bool compare_exchange_strong(T& expected, T desired,
641
+ memory_order order = memory_order_seq_cst) volatile noexcept;
642
+ bool compare_exchange_strong(T& expected, T desired,
643
+ memory_order order = memory_order_seq_cst) noexcept;
644
+ ```
645
+
646
+ *Requires:* The `failure` argument shall not be `memory_order_release`
647
+ nor `memory_order_acq_rel`.
648
+
649
+ *Effects:* Retrieves the value in `expected`. It then atomically
650
+ compares the contents of the memory pointed to by `this` for equality
651
+ with that previously retrieved from `expected`, and if true, replaces
652
+ the contents of the memory pointed to by `this` with that in `desired`.
653
+ If and only if the comparison is true, memory is affected according to
654
+ the value of `success`, and if the comparison is false, memory is
655
+ affected according to the value of `failure`. When only one
656
+ `memory_order` argument is supplied, the value of `success` is `order`,
657
+ and the value of `failure` is `order` except that a value of
658
+ `memory_order_acq_rel` shall be replaced by the value
659
+ `memory_order_acquire` and a value of `memory_order_release` shall be
660
+ replaced by the value `memory_order_relaxed`. If and only if the
661
+ comparison is false then, after the atomic operation, the contents of
662
+ the memory in `expected` are replaced by the value read from the memory
663
+ pointed to by `this` during the atomic comparison. If the operation
664
+ returns `true`, these operations are atomic read-modify-write
665
+ operations ([[intro.multithread]]) on the memory pointed to by `this`.
666
+ Otherwise, these operations are atomic load operations on that memory.
667
+
668
+ *Returns:* The result of the comparison.
669
+
670
+ [*Note 6*:
671
+
672
+ For example, the effect of `compare_exchange_strong` is
673
+
674
+ ``` cpp
675
+ if (memcmp(this, &expected, sizeof(*this)) == 0)
676
+ memcpy(this, &desired, sizeof(*this));
677
+ else
678
+ memcpy(expected, this, sizeof(*this));
679
+ ```
680
+
681
+ — *end note*]
682
+
683
+ [*Example 2*:
684
+
685
+ The expected use of the compare-and-exchange operations is as follows.
686
+ The compare-and-exchange operations will update `expected` when another
687
+ iteration of the loop is needed.
688
+
689
+ ``` cpp
690
+ expected = current.load();
691
+ do {
692
+ desired = function(expected);
693
+ } while (!current.compare_exchange_weak(expected, desired));
694
+ ```
695
+
696
+ — *end example*]
697
+
698
+ [*Example 3*:
699
+
700
+ Because the expected value is updated only on failure, code releasing
701
+ the memory containing the `expected` value on success will work. E.g.
702
+ list head insertion will act atomically and would not introduce a data
703
+ race in the following code:
704
+
705
+ ``` cpp
706
+ do {
707
+ p->next = head; // make new list node point to the current head
708
+ } while (!head.compare_exchange_weak(p->next, p)); // try to insert
709
+ ```
710
+
711
+ — *end example*]
712
+
713
+ Implementations should ensure that weak compare-and-exchange operations
714
+ do not consistently return `false` unless either the atomic object has
715
+ value different from `expected` or there are concurrent modifications to
716
+ the atomic object.
717
+
718
+ *Remarks:* A weak compare-and-exchange operation may fail spuriously.
719
+ That is, even when the contents of memory referred to by `expected` and
720
+ `this` are equal, it may return `false` and store back to `expected` the
721
+ same memory contents that were originally there.
722
+
723
+ [*Note 7*: This spurious failure enables implementation of
724
+ compare-and-exchange on a broader class of machines, e.g., load-locked
725
+ store-conditional machines. A consequence of spurious failure is that
726
+ nearly all uses of weak compare-and-exchange will be in a loop. When a
727
+ compare-and-exchange is in a loop, the weak version will yield better
728
+ performance on some platforms. When a weak compare-and-exchange would
729
+ require a loop and a strong one would not, the strong one is
730
+ preferable. — *end note*]
731
+
732
+ [*Note 8*: The `memcpy` and `memcmp` semantics of the
733
+ compare-and-exchange operations may result in failed comparisons for
734
+ values that compare equal with `operator==` if the underlying type has
735
+ padding bits, trap bits, or alternate representations of the same value.
736
+ Thus, `compare_exchange_strong` should be used with extreme care. On the
737
+ other hand, `compare_exchange_weak` should converge
738
+ rapidly. — *end note*]
739
+
740
+ ### Specializations for integers <a id="atomics.types.int">[[atomics.types.int]]</a>
741
+
742
+ There are specializations of the `atomic` template for the integral
743
+ types `char`, `signed char`, `unsigned char`, `short`, `unsigned short`,
744
+ `int`, `unsigned int`, `long`, `unsigned long`, `long long`,
745
+ `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`, and any other
746
+ types needed by the typedefs in the header `<cstdint>`. For each such
747
+ integral type `integral`, the specialization `atomic<integral>` provides
748
+ additional atomic operations appropriate to integral types.
749
+
750
+ [*Note 1*: For the specialization `atomic<bool>`, see
751
+ [[atomics.types.generic]]. — *end note*]
752
+
753
+ ``` cpp
754
+ namespace std {
755
  template <> struct atomic<integral> {
756
+ using value_type = integral;
757
+ using difference_type = value_type;
758
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
759
  bool is_lock_free() const volatile noexcept;
760
  bool is_lock_free() const noexcept;
761
  void store(integral, memory_order = memory_order_seq_cst) volatile noexcept;
762
  void store(integral, memory_order = memory_order_seq_cst) noexcept;
763
  integral load(memory_order = memory_order_seq_cst) const volatile noexcept;
764
  integral load(memory_order = memory_order_seq_cst) const noexcept;
765
  operator integral() const volatile noexcept;
766
  operator integral() const noexcept;
767
  integral exchange(integral, memory_order = memory_order_seq_cst) volatile noexcept;
768
  integral exchange(integral, memory_order = memory_order_seq_cst) noexcept;
769
+ bool compare_exchange_weak(integral&, integral,
770
+ memory_order, memory_order) volatile noexcept;
771
+ bool compare_exchange_weak(integral&, integral,
772
+ memory_order, memory_order) noexcept;
773
+ bool compare_exchange_strong(integral&, integral,
774
+ memory_order, memory_order) volatile noexcept;
775
+ bool compare_exchange_strong(integral&, integral,
776
+ memory_order, memory_order) noexcept;
777
+ bool compare_exchange_weak(integral&, integral,
778
+ memory_order = memory_order_seq_cst) volatile noexcept;
779
+ bool compare_exchange_weak(integral&, integral,
780
+ memory_order = memory_order_seq_cst) noexcept;
781
+ bool compare_exchange_strong(integral&, integral,
782
+ memory_order = memory_order_seq_cst) volatile noexcept;
783
+ bool compare_exchange_strong(integral&, integral,
784
+ memory_order = memory_order_seq_cst) noexcept;
785
  integral fetch_add(integral, memory_order = memory_order_seq_cst) volatile noexcept;
786
  integral fetch_add(integral, memory_order = memory_order_seq_cst) noexcept;
787
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) volatile noexcept;
788
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) noexcept;
789
  integral fetch_and(integral, memory_order = memory_order_seq_cst) volatile noexcept;
 
818
  integral operator|=(integral) volatile noexcept;
819
  integral operator|=(integral) noexcept;
820
  integral operator^=(integral) volatile noexcept;
821
  integral operator^=(integral) noexcept;
822
  };
823
+ }
824
+ ```
825
 
826
+ The atomic integral specializations are standard-layout structs. They
827
+ each have a trivial default constructor and a trivial destructor.
828
+
829
+ Descriptions are provided below only for members that differ from the
830
+ primary template.
831
+
832
+ The following operations perform arithmetic computations. The key,
833
+ operator, and computation correspondence is:
834
+
835
+ **Table: Atomic arithmetic computations** <a id="tab:atomic.arithmetic.computations">[tab:atomic.arithmetic.computations]</a>
836
+
837
+ | | | | | | |
838
+ | ----- | --- | -------------------- | ----- | --- | -------------------- |
839
+ | `add` | `+` | addition | `sub` | `-` | subtraction |
840
+ | `or` | `|` | bitwise inclusive or | `xor` | `^` | bitwise exclusive or |
841
+ | `and` | `&` | bitwise and | | | |
842
+
843
+ ``` cpp
844
+ T fetch_key(T operand, memory_order order = memory_order_seq_cst) volatile noexcept;
845
+ T fetch_key(T operand, memory_order order = memory_order_seq_cst) noexcept;
846
+ ```
847
+
848
+ *Effects:* Atomically replaces the value pointed to by `this` with the
849
+ result of the computation applied to the value pointed to by `this` and
850
+ the given `operand`. Memory is affected according to the value of
851
+ `order`. These operations are atomic read-modify-write
852
+ operations ([[intro.multithread]]).
853
+
854
+ *Returns:* Atomically, the value pointed to by `this` immediately before
855
+ the effects.
856
+
857
+ *Remarks:* For signed integer types, arithmetic is defined to use two’s
858
+ complement representation. There are no undefined results.
859
+
860
+ ``` cpp
861
+ T operator op=(T operand) volatile noexcept;
862
+ T operator op=(T operand) noexcept;
863
+ ```
864
+
865
+ *Effects:* Equivalent to:
866
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
867
+
868
+ ### Partial specialization for pointers <a id="atomics.types.pointer">[[atomics.types.pointer]]</a>
869
+
870
+ ``` cpp
871
+ namespace std {
872
  template <class T> struct atomic<T*> {
873
+ using value_type = T*;
874
+ using difference_type = ptrdiff_t;
875
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
876
  bool is_lock_free() const volatile noexcept;
877
  bool is_lock_free() const noexcept;
878
  void store(T*, memory_order = memory_order_seq_cst) volatile noexcept;
879
  void store(T*, memory_order = memory_order_seq_cst) noexcept;
880
  T* load(memory_order = memory_order_seq_cst) const volatile noexcept;
 
918
  T* operator-=(ptrdiff_t) noexcept;
919
  };
920
  }
921
  ```
922
 
923
+ There is a partial specialization of the `atomic` class template for
924
+ pointers. Specializations of this partial specialization are
925
+ standard-layout structs. They each have a trivial default constructor
926
+ and a trivial destructor.
927
+
928
+ Descriptions are provided below only for members that differ from the
929
+ primary template.
930
+
931
+ The following operations perform pointer arithmetic. The key, operator,
932
+ and computation correspondence is:
933
+
934
+ **Table: Atomic pointer computations** <a id="tab:atomic.pointer.computations">[tab:atomic.pointer.computations]</a>
935
+
936
+ | | | | | | |
937
+ | ----- | --- | -------- | ----- | --- | ----------- |
938
+ | `add` | `+` | addition | `sub` | `-` | subtraction |
939
+
940
+ ``` cpp
941
+ T* fetch_key(ptrdiff_t operand, memory_order order = memory_order_seq_cst) volatile noexcept;
942
+ T* fetch_key(ptrdiff_t operand, memory_order order = memory_order_seq_cst) noexcept;
943
+ ```
944
+
945
+ *Requires:* T shall be an object type, otherwise the program is
946
+ ill-formed.
947
+
948
+ [*Note 1*: Pointer arithmetic on `void*` or function pointers is
949
+ ill-formed. *end note*]
950
+
951
+ *Effects:* Atomically replaces the value pointed to by `this` with the
952
+ result of the computation applied to the value pointed to by `this` and
953
+ the given `operand`. Memory is affected according to the value of
954
+ `order`. These operations are atomic read-modify-write
955
+ operations ([[intro.multithread]]).
956
+
957
+ *Returns:* Atomically, the value pointed to by `this` immediately before
958
+ the effects.
959
+
960
+ *Remarks:* The result may be an undefined address, but the operations
961
+ otherwise have no undefined behavior.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
962
 
963
  ``` cpp
964
+ T* operator op=(ptrdiff_t operand) volatile noexcept;
965
+ T* operator op=(ptrdiff_t operand) noexcept;
966
  ```
967
 
968
+ *Effects:* Equivalent to:
969
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
970
+
971
+ ### Member operators common to integers and pointers to objects <a id="atomics.types.memop">[[atomics.types.memop]]</a>
972
 
973
  ``` cpp
974
+ T operator++(int) volatile noexcept;
975
+ T operator++(int) noexcept;
976
  ```
977
 
978
+ *Effects:* Equivalent to: `return fetch_add(1);`
 
 
 
 
 
 
 
979
 
980
  ``` cpp
981
+ T operator--(int) volatile noexcept;
982
+ T operator--(int) noexcept;
983
  ```
984
 
985
+ *Effects:* Equivalent to: `return fetch_sub(1);`
 
 
 
 
986
 
987
  ``` cpp
988
+ T operator++() volatile noexcept;
989
+ T operator++() noexcept;
990
  ```
991
 
992
+ *Effects:* Equivalent to: `return fetch_add(1) + 1;`
993
+
994
  ``` cpp
995
+ T operator--() volatile noexcept;
996
+ T operator--() noexcept;
 
 
997
  ```
998
 
999
+ *Effects:* Equivalent to: `return fetch_sub(1) - 1;`
1000
+
1001
+ ## Non-member functions <a id="atomics.nonmembers">[[atomics.nonmembers]]</a>
1002
+
1003
+ A non-member function template whose name matches the pattern `atomic_f`
1004
+ or the pattern `atomic_f_explicit` invokes the member function `f`, with
1005
+ the value of the first parameter as the object expression and the values
1006
+ of the remaining parameters (if any) as the arguments of the member
1007
+ function call, in order. An argument for a parameter of type
1008
+ `atomic<T>::value_type*` is dereferenced when passed to the member
1009
+ function call. If no such member function exists, the program is
1010
+ ill-formed.
1011
 
1012
  ``` cpp
1013
+ template<class T>
1014
+ void atomic_init(volatile atomic<T>* object, typename atomic<T>::value_type desired) noexcept;
1015
+ template<class T>
1016
+ void atomic_init(atomic<T>* object, typename atomic<T>::value_type desired) noexcept;
1017
  ```
1018
 
1019
  *Effects:* Non-atomically initializes `*object` with value `desired`.
1020
  This function shall only be applied to objects that have been default
1021
+ constructed, and then only once.
 
 
1022
 
1023
+ [*Note 1*: These semantics ensure compatibility with C. — *end note*]
 
 
 
 
 
 
 
1024
 
1025
+ [*Note 2*: Concurrent access from another thread, even via an atomic
1026
+ operation, constitutes a data race. — *end note*]
1027
 
1028
+ [*Note 1*: The non-member functions enable programmers to write code
1029
+ that can be compiled as either C or C++, for example in a shared header
1030
+ file. *end note*]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031
 
1032
  ## Flag type and operations <a id="atomics.flag">[[atomics.flag]]</a>
1033
 
1034
  ``` cpp
1035
  namespace std {
1036
+ struct atomic_flag {
1037
  bool test_and_set(memory_order = memory_order_seq_cst) volatile noexcept;
1038
  bool test_and_set(memory_order = memory_order_seq_cst) noexcept;
1039
  void clear(memory_order = memory_order_seq_cst) volatile noexcept;
1040
  void clear(memory_order = memory_order_seq_cst) noexcept;
1041
 
1042
  atomic_flag() noexcept = default;
1043
  atomic_flag(const atomic_flag&) = delete;
1044
  atomic_flag& operator=(const atomic_flag&) = delete;
1045
  atomic_flag& operator=(const atomic_flag&) volatile = delete;
1046
+ };
1047
 
1048
  bool atomic_flag_test_and_set(volatile atomic_flag*) noexcept;
1049
  bool atomic_flag_test_and_set(atomic_flag*) noexcept;
1050
  bool atomic_flag_test_and_set_explicit(volatile atomic_flag*, memory_order) noexcept;
1051
  bool atomic_flag_test_and_set_explicit(atomic_flag*, memory_order) noexcept;
 
1059
  ```
1060
 
1061
  The `atomic_flag` type provides the classic test-and-set functionality.
1062
  It has two states, set and clear.
1063
 
1064
+ Operations on an object of type `atomic_flag` shall be lock-free.
 
 
 
 
 
1065
 
1066
+ [*Note 1*: Hence the operations should also be
1067
+ address-free. *end note*]
1068
+
1069
+ The `atomic_flag` type is a standard-layout struct. It has a trivial
1070
+ default constructor and a trivial destructor.
1071
 
1072
  The macro `ATOMIC_FLAG_INIT` shall be defined in such a way that it can
1073
  be used to initialize an object of type `atomic_flag` to the clear
1074
  state. The macro can be used in the form:
1075
 
 
1091
  bool atomic_flag::test_and_set(memory_order order = memory_order_seq_cst) volatile noexcept;
1092
  bool atomic_flag::test_and_set(memory_order order = memory_order_seq_cst) noexcept;
1093
  ```
1094
 
1095
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
1096
+ to `true`. Memory is affected according to the value of `order`. These
1097
  operations are atomic read-modify-write
1098
  operations ([[intro.multithread]]).
1099
 
1100
  *Returns:* Atomically, the value of the object immediately before the
1101
  effects.
 
1111
 
1112
  *Requires:* The `order` argument shall not be `memory_order_consume`,
1113
  `memory_order_acquire`, nor `memory_order_acq_rel`.
1114
 
1115
  *Effects:* Atomically sets the value pointed to by `object` or by `this`
1116
+ to `false`. Memory is affected according to the value of `order`.
1117
 
1118
  ## Fences <a id="atomics.fences">[[atomics.fences]]</a>
1119
 
1120
  This section introduces synchronization primitives called *fences*.
1121
  Fences can have acquire semantics, release semantics, or both. A fence
 
1144
 
1145
  ``` cpp
1146
  extern "C" void atomic_thread_fence(memory_order order) noexcept;
1147
  ```
1148
 
1149
+ *Effects:* Depending on the value of `order`, this operation:
1150
 
1151
  - has no effects, if `order == memory_order_relaxed`;
1152
  - is an acquire fence, if
1153
  `order == memory_order_acquire || order == memory_order_consume`;
1154
  - is a release fence, if `order == memory_order_release`;
 
1163
 
1164
  *Effects:* Equivalent to `atomic_thread_fence(order)`, except that the
1165
  resulting ordering constraints are established only between a thread and
1166
  a signal handler executed in the same thread.
1167
 
1168
+ [*Note 1*: `atomic_signal_fence` can be used to specify the order in
1169
+ which actions performed by the thread become visible to the signal
1170
+ handler. Compiler optimizations and reorderings of loads and stores are
 
1171
  inhibited in the same way as with `atomic_thread_fence`, but the
1172
  hardware fence instructions that `atomic_thread_fence` would have
1173
+ inserted are not emitted. — *end note*]
1174
 
1175
  <!-- Link reference definitions -->
1176
  [atomics]: #atomics
1177
+ [atomics.alias]: #atomics.alias
1178
  [atomics.fences]: #atomics.fences
1179
  [atomics.flag]: #atomics.flag
1180
  [atomics.general]: #atomics.general
1181
  [atomics.lockfree]: #atomics.lockfree
1182
+ [atomics.nonmembers]: #atomics.nonmembers
1183
  [atomics.order]: #atomics.order
1184
  [atomics.syn]: #atomics.syn
1185
  [atomics.types.generic]: #atomics.types.generic
1186
+ [atomics.types.int]: #atomics.types.int
1187
+ [atomics.types.memop]: #atomics.types.memop
1188
  [atomics.types.operations]: #atomics.types.operations
1189
+ [atomics.types.pointer]: #atomics.types.pointer
 
 
 
 
1190
  [basic.types]: basic.md#basic.types
1191
  [intro.multithread]: intro.md#intro.multithread
1192
+ [intro.progress]: intro.md#intro.progress