From Jason Turner

[atomics.types.generic]

Diff to HTML by rtfpessoa

Files changed (1) hide show
  1. tmp/tmpt16noogu/{from.md → to.md} +420 -57
tmp/tmpt16noogu/{from.md → to.md} RENAMED
@@ -1,10 +1,12 @@
1
- ## Atomic types <a id="atomics.types.generic">[[atomics.types.generic]]</a>
2
 
3
  ``` cpp
4
  namespace std {
5
  template <class T> struct atomic {
 
 
6
  bool is_lock_free() const volatile noexcept;
7
  bool is_lock_free() const noexcept;
8
  void store(T, memory_order = memory_order_seq_cst) volatile noexcept;
9
  void store(T, memory_order = memory_order_seq_cst) noexcept;
10
  T load(memory_order = memory_order_seq_cst) const volatile noexcept;
@@ -28,30 +30,311 @@ namespace std {
28
  atomic& operator=(const atomic&) = delete;
29
  atomic& operator=(const atomic&) volatile = delete;
30
  T operator=(T) volatile noexcept;
31
  T operator=(T) noexcept;
32
  };
 
 
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  template <> struct atomic<integral> {
 
 
 
35
  bool is_lock_free() const volatile noexcept;
36
  bool is_lock_free() const noexcept;
37
  void store(integral, memory_order = memory_order_seq_cst) volatile noexcept;
38
  void store(integral, memory_order = memory_order_seq_cst) noexcept;
39
  integral load(memory_order = memory_order_seq_cst) const volatile noexcept;
40
  integral load(memory_order = memory_order_seq_cst) const noexcept;
41
  operator integral() const volatile noexcept;
42
  operator integral() const noexcept;
43
  integral exchange(integral, memory_order = memory_order_seq_cst) volatile noexcept;
44
  integral exchange(integral, memory_order = memory_order_seq_cst) noexcept;
45
- bool compare_exchange_weak(integral&, integral, memory_order, memory_order) volatile noexcept;
46
- bool compare_exchange_weak(integral&, integral, memory_order, memory_order) noexcept;
47
- bool compare_exchange_strong(integral&, integral, memory_order, memory_order) volatile noexcept;
48
- bool compare_exchange_strong(integral&, integral, memory_order, memory_order) noexcept;
49
- bool compare_exchange_weak(integral&, integral, memory_order = memory_order_seq_cst) volatile noexcept;
50
- bool compare_exchange_weak(integral&, integral, memory_order = memory_order_seq_cst) noexcept;
51
- bool compare_exchange_strong(integral&, integral, memory_order = memory_order_seq_cst) volatile noexcept;
52
- bool compare_exchange_strong(integral&, integral, memory_order = memory_order_seq_cst) noexcept;
 
 
 
 
 
 
 
 
53
  integral fetch_add(integral, memory_order = memory_order_seq_cst) volatile noexcept;
54
  integral fetch_add(integral, memory_order = memory_order_seq_cst) noexcept;
55
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) volatile noexcept;
56
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) noexcept;
57
  integral fetch_and(integral, memory_order = memory_order_seq_cst) volatile noexcept;
@@ -86,12 +369,63 @@ namespace std {
86
  integral operator|=(integral) volatile noexcept;
87
  integral operator|=(integral) noexcept;
88
  integral operator^=(integral) volatile noexcept;
89
  integral operator^=(integral) noexcept;
90
  };
 
 
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  template <class T> struct atomic<T*> {
 
 
 
93
  bool is_lock_free() const volatile noexcept;
94
  bool is_lock_free() const noexcept;
95
  void store(T*, memory_order = memory_order_seq_cst) volatile noexcept;
96
  void store(T*, memory_order = memory_order_seq_cst) noexcept;
97
  T* load(memory_order = memory_order_seq_cst) const volatile noexcept;
@@ -135,54 +469,83 @@ namespace std {
135
  T* operator-=(ptrdiff_t) noexcept;
136
  };
137
  }
138
  ```
139
 
140
- There is a generic class template `atomic<T>`. The type of the template
141
- argument `T` shall be trivially copyable ([[basic.types]]). Type
142
- arguments that are not also statically initializable may be difficult to
143
- use.
144
-
145
- The semantics of the operations on specializations of `atomic` are
146
- defined in  [[atomics.types.operations]].
147
-
148
- Specializations and instantiations of the `atomic` template shall have a
149
- deleted copy constructor, a deleted copy assignment operator, and a
150
- constexpr value constructor.
151
-
152
- There shall be explicit specializations of the `atomic` template for the
153
- integral types `char`, `signed char`, `unsigned char`, `short`,
154
- `unsigned short`, `int`, `unsigned int`, `long`, `unsigned long`,
155
- `long long`, `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`,
156
- and any other types needed by the typedefs in the header `<cstdint>`.
157
- For each integral type *integral*, the specialization `atomic<integral>`
158
- provides additional atomic operations appropriate to integral types.
159
- There shall be a specialization `atomic<bool>` which provides the
160
- general atomic operations as specified in
161
- [[atomics.types.operations.general]].
162
-
163
- The atomic integral specializations and the specialization
164
- `atomic<bool>` shall have standard layout. They shall each have a
165
- trivial default constructor and a trivial destructor. They shall each
166
- support aggregate initialization syntax.
167
-
168
- There shall be pointer partial specializations of the `atomic` class
169
- template. These specializations shall have standard layout, trivial
170
- default constructors, and trivial destructors. They shall each support
171
- aggregate initialization syntax.
172
-
173
- There shall be named types corresponding to the integral specializations
174
- of `atomic`, as specified in Table  [[tab:atomics.integral]], and a
175
- named type `atomic_bool` corresponding to the specified `atomic<bool>`.
176
- Each named type is either a typedef to the corresponding specialization
177
- or a base class of the corresponding specialization. If it is a base
178
- class, it shall support the same member functions as the corresponding
179
- specialization.
180
-
181
- There shall be atomic typedefs corresponding to the typedefs in the
182
- header `<inttypes.h>` as specified in Table  [[tab:atomics.typedefs]].
183
-
184
- The representation of an atomic specialization need not have the same
185
- size as its corresponding argument type. Specializations should have the
186
- same size whenever possible, as this reduces the effort required to port
187
- existing code.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
 
1
+ ## Class template `atomic` <a id="atomics.types.generic">[[atomics.types.generic]]</a>
2
 
3
  ``` cpp
4
  namespace std {
5
  template <class T> struct atomic {
6
+ using value_type = T;
7
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
8
  bool is_lock_free() const volatile noexcept;
9
  bool is_lock_free() const noexcept;
10
  void store(T, memory_order = memory_order_seq_cst) volatile noexcept;
11
  void store(T, memory_order = memory_order_seq_cst) noexcept;
12
  T load(memory_order = memory_order_seq_cst) const volatile noexcept;
 
30
  atomic& operator=(const atomic&) = delete;
31
  atomic& operator=(const atomic&) volatile = delete;
32
  T operator=(T) volatile noexcept;
33
  T operator=(T) noexcept;
34
  };
35
+ }
36
+ ```
37
 
38
+ The template argument for `T` shall be trivially copyable (
39
+ [[basic.types]]).
40
+
41
+ [*Note 1*: Type arguments that are not also statically initializable
42
+ may be difficult to use. — *end note*]
43
+
44
+ The specialization `atomic<bool>` is a standard-layout struct.
45
+
46
+ [*Note 2*: The representation of an atomic specialization need not have
47
+ the same size as its corresponding argument type. Specializations should
48
+ have the same size whenever possible, as this reduces the effort
49
+ required to port existing code. — *end note*]
50
+
51
+ ### Operations on atomic types <a id="atomics.types.operations">[[atomics.types.operations]]</a>
52
+
53
+ [*Note 1*: Many operations are volatile-qualified. The “volatile as
54
+ device register” semantics have not changed in the standard. This
55
+ qualification means that volatility is preserved when applying these
56
+ operations to volatile objects. It does not mean that operations on
57
+ non-volatile objects become volatile. — *end note*]
58
+
59
+ ``` cpp
60
+ atomic() noexcept = default;
61
+ ```
62
+
63
+ *Effects:* Leaves the atomic object in an uninitialized state.
64
+
65
+ [*Note 1*: These semantics ensure compatibility with C. — *end note*]
66
+
67
+ ``` cpp
68
+ constexpr atomic(T desired) noexcept;
69
+ ```
70
+
71
+ *Effects:* Initializes the object with the value `desired`.
72
+ Initialization is not an atomic operation ([[intro.multithread]]).
73
+
74
+ [*Note 2*: It is possible to have an access to an atomic object `A`
75
+ race with its construction, for example by communicating the address of
76
+ the just-constructed object `A` to another thread via
77
+ `memory_order_relaxed` operations on a suitable atomic pointer variable,
78
+ and then immediately accessing `A` in the receiving thread. This results
79
+ in undefined behavior. — *end note*]
80
+
81
+ ``` cpp
82
+ #define ATOMIC_VAR_INIT(value) see below
83
+ ```
84
+
85
+ The macro expands to a token sequence suitable for constant
86
+ initialization of an atomic variable of static storage duration of a
87
+ type that is initialization-compatible with `value`.
88
+
89
+ [*Note 3*: This operation may need to initialize locks. — *end note*]
90
+
91
+ Concurrent access to the variable being initialized, even via an atomic
92
+ operation, constitutes a data race.
93
+
94
+ [*Example 1*:
95
+
96
+ ``` cpp
97
+ atomic<int> v = ATOMIC_VAR_INIT(5);
98
+ ```
99
+
100
+ — *end example*]
101
+
102
+ ``` cpp
103
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
104
+ ```
105
+
106
+ The `static` data member `is_always_lock_free` is `true` if the atomic
107
+ type’s operations are always lock-free, and `false` otherwise.
108
+
109
+ [*Note 4*: The value of `is_always_lock_free` is consistent with the
110
+ value of the corresponding `ATOMIC_..._LOCK_FREE` macro, if
111
+ defined. — *end note*]
112
+
113
+ ``` cpp
114
+ bool is_lock_free() const volatile noexcept;
115
+ bool is_lock_free() const noexcept;
116
+ ```
117
+
118
+ *Returns:* `true` if the object’s operations are lock-free, `false`
119
+ otherwise.
120
+
121
+ [*Note 5*: The return value of the `is_lock_free` member function is
122
+ consistent with the value of `is_always_lock_free` for the same
123
+ type. — *end note*]
124
+
125
+ ``` cpp
126
+ void store(T desired, memory_order order = memory_order_seq_cst) volatile noexcept;
127
+ void store(T desired, memory_order order = memory_order_seq_cst) noexcept;
128
+ ```
129
+
130
+ *Requires:* The `order` argument shall not be `memory_order_consume`,
131
+ `memory_order_acquire`, nor `memory_order_acq_rel`.
132
+
133
+ *Effects:* Atomically replaces the value pointed to by `this` with the
134
+ value of `desired`. Memory is affected according to the value of
135
+ `order`.
136
+
137
+ ``` cpp
138
+ T operator=(T desired) volatile noexcept;
139
+ T operator=(T desired) noexcept;
140
+ ```
141
+
142
+ *Effects:* Equivalent to: `store(desired)`.
143
+
144
+ *Returns:* `desired`.
145
+
146
+ ``` cpp
147
+ T load(memory_order order = memory_order_seq_cst) const volatile noexcept;
148
+ T load(memory_order order = memory_order_seq_cst) const noexcept;
149
+ ```
150
+
151
+ *Requires:* The `order` argument shall not be `memory_order_release` nor
152
+ `memory_order_acq_rel`.
153
+
154
+ *Effects:* Memory is affected according to the value of `order`.
155
+
156
+ *Returns:* Atomically returns the value pointed to by `this`.
157
+
158
+ ``` cpp
159
+ operator T() const volatile noexcept;
160
+ operator T() const noexcept;
161
+ ```
162
+
163
+ *Effects:* Equivalent to: `return load();`
164
+
165
+ ``` cpp
166
+ T exchange(T desired, memory_order order = memory_order_seq_cst) volatile noexcept;
167
+ T exchange(T desired, memory_order order = memory_order_seq_cst) noexcept;
168
+ ```
169
+
170
+ *Effects:* Atomically replaces the value pointed to by `this` with
171
+ `desired`. Memory is affected according to the value of `order`. These
172
+ operations are atomic read-modify-write
173
+ operations ([[intro.multithread]]).
174
+
175
+ *Returns:* Atomically returns the value pointed to by `this` immediately
176
+ before the effects.
177
+
178
+ ``` cpp
179
+ bool compare_exchange_weak(T& expected, T desired,
180
+ memory_order success, memory_order failure) volatile noexcept;
181
+ bool compare_exchange_weak(T& expected, T desired,
182
+ memory_order success, memory_order failure) noexcept;
183
+ bool compare_exchange_strong(T& expected, T desired,
184
+ memory_order success, memory_order failure) volatile noexcept;
185
+ bool compare_exchange_strong(T& expected, T desired,
186
+ memory_order success, memory_order failure) noexcept;
187
+ bool compare_exchange_weak(T& expected, T desired,
188
+ memory_order order = memory_order_seq_cst) volatile noexcept;
189
+ bool compare_exchange_weak(T& expected, T desired,
190
+ memory_order order = memory_order_seq_cst) noexcept;
191
+ bool compare_exchange_strong(T& expected, T desired,
192
+ memory_order order = memory_order_seq_cst) volatile noexcept;
193
+ bool compare_exchange_strong(T& expected, T desired,
194
+ memory_order order = memory_order_seq_cst) noexcept;
195
+ ```
196
+
197
+ *Requires:* The `failure` argument shall not be `memory_order_release`
198
+ nor `memory_order_acq_rel`.
199
+
200
+ *Effects:* Retrieves the value in `expected`. It then atomically
201
+ compares the contents of the memory pointed to by `this` for equality
202
+ with that previously retrieved from `expected`, and if true, replaces
203
+ the contents of the memory pointed to by `this` with that in `desired`.
204
+ If and only if the comparison is true, memory is affected according to
205
+ the value of `success`, and if the comparison is false, memory is
206
+ affected according to the value of `failure`. When only one
207
+ `memory_order` argument is supplied, the value of `success` is `order`,
208
+ and the value of `failure` is `order` except that a value of
209
+ `memory_order_acq_rel` shall be replaced by the value
210
+ `memory_order_acquire` and a value of `memory_order_release` shall be
211
+ replaced by the value `memory_order_relaxed`. If and only if the
212
+ comparison is false then, after the atomic operation, the contents of
213
+ the memory in `expected` are replaced by the value read from the memory
214
+ pointed to by `this` during the atomic comparison. If the operation
215
+ returns `true`, these operations are atomic read-modify-write
216
+ operations ([[intro.multithread]]) on the memory pointed to by `this`.
217
+ Otherwise, these operations are atomic load operations on that memory.
218
+
219
+ *Returns:* The result of the comparison.
220
+
221
+ [*Note 6*:
222
+
223
+ For example, the effect of `compare_exchange_strong` is
224
+
225
+ ``` cpp
226
+ if (memcmp(this, &expected, sizeof(*this)) == 0)
227
+ memcpy(this, &desired, sizeof(*this));
228
+ else
229
+ memcpy(expected, this, sizeof(*this));
230
+ ```
231
+
232
+ — *end note*]
233
+
234
+ [*Example 2*:
235
+
236
+ The expected use of the compare-and-exchange operations is as follows.
237
+ The compare-and-exchange operations will update `expected` when another
238
+ iteration of the loop is needed.
239
+
240
+ ``` cpp
241
+ expected = current.load();
242
+ do {
243
+ desired = function(expected);
244
+ } while (!current.compare_exchange_weak(expected, desired));
245
+ ```
246
+
247
+ — *end example*]
248
+
249
+ [*Example 3*:
250
+
251
+ Because the expected value is updated only on failure, code releasing
252
+ the memory containing the `expected` value on success will work. E.g.
253
+ list head insertion will act atomically and would not introduce a data
254
+ race in the following code:
255
+
256
+ ``` cpp
257
+ do {
258
+ p->next = head; // make new list node point to the current head
259
+ } while (!head.compare_exchange_weak(p->next, p)); // try to insert
260
+ ```
261
+
262
+ — *end example*]
263
+
264
+ Implementations should ensure that weak compare-and-exchange operations
265
+ do not consistently return `false` unless either the atomic object has
266
+ value different from `expected` or there are concurrent modifications to
267
+ the atomic object.
268
+
269
+ *Remarks:* A weak compare-and-exchange operation may fail spuriously.
270
+ That is, even when the contents of memory referred to by `expected` and
271
+ `this` are equal, it may return `false` and store back to `expected` the
272
+ same memory contents that were originally there.
273
+
274
+ [*Note 7*: This spurious failure enables implementation of
275
+ compare-and-exchange on a broader class of machines, e.g., load-locked
276
+ store-conditional machines. A consequence of spurious failure is that
277
+ nearly all uses of weak compare-and-exchange will be in a loop. When a
278
+ compare-and-exchange is in a loop, the weak version will yield better
279
+ performance on some platforms. When a weak compare-and-exchange would
280
+ require a loop and a strong one would not, the strong one is
281
+ preferable. — *end note*]
282
+
283
+ [*Note 8*: The `memcpy` and `memcmp` semantics of the
284
+ compare-and-exchange operations may result in failed comparisons for
285
+ values that compare equal with `operator==` if the underlying type has
286
+ padding bits, trap bits, or alternate representations of the same value.
287
+ Thus, `compare_exchange_strong` should be used with extreme care. On the
288
+ other hand, `compare_exchange_weak` should converge
289
+ rapidly. — *end note*]
290
+
291
+ ### Specializations for integers <a id="atomics.types.int">[[atomics.types.int]]</a>
292
+
293
+ There are specializations of the `atomic` template for the integral
294
+ types `char`, `signed char`, `unsigned char`, `short`, `unsigned short`,
295
+ `int`, `unsigned int`, `long`, `unsigned long`, `long long`,
296
+ `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`, and any other
297
+ types needed by the typedefs in the header `<cstdint>`. For each such
298
+ integral type `integral`, the specialization `atomic<integral>` provides
299
+ additional atomic operations appropriate to integral types.
300
+
301
+ [*Note 1*: For the specialization `atomic<bool>`, see
302
+ [[atomics.types.generic]]. — *end note*]
303
+
304
+ ``` cpp
305
+ namespace std {
306
  template <> struct atomic<integral> {
307
+ using value_type = integral;
308
+ using difference_type = value_type;
309
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
310
  bool is_lock_free() const volatile noexcept;
311
  bool is_lock_free() const noexcept;
312
  void store(integral, memory_order = memory_order_seq_cst) volatile noexcept;
313
  void store(integral, memory_order = memory_order_seq_cst) noexcept;
314
  integral load(memory_order = memory_order_seq_cst) const volatile noexcept;
315
  integral load(memory_order = memory_order_seq_cst) const noexcept;
316
  operator integral() const volatile noexcept;
317
  operator integral() const noexcept;
318
  integral exchange(integral, memory_order = memory_order_seq_cst) volatile noexcept;
319
  integral exchange(integral, memory_order = memory_order_seq_cst) noexcept;
320
+ bool compare_exchange_weak(integral&, integral,
321
+ memory_order, memory_order) volatile noexcept;
322
+ bool compare_exchange_weak(integral&, integral,
323
+ memory_order, memory_order) noexcept;
324
+ bool compare_exchange_strong(integral&, integral,
325
+ memory_order, memory_order) volatile noexcept;
326
+ bool compare_exchange_strong(integral&, integral,
327
+ memory_order, memory_order) noexcept;
328
+ bool compare_exchange_weak(integral&, integral,
329
+ memory_order = memory_order_seq_cst) volatile noexcept;
330
+ bool compare_exchange_weak(integral&, integral,
331
+ memory_order = memory_order_seq_cst) noexcept;
332
+ bool compare_exchange_strong(integral&, integral,
333
+ memory_order = memory_order_seq_cst) volatile noexcept;
334
+ bool compare_exchange_strong(integral&, integral,
335
+ memory_order = memory_order_seq_cst) noexcept;
336
  integral fetch_add(integral, memory_order = memory_order_seq_cst) volatile noexcept;
337
  integral fetch_add(integral, memory_order = memory_order_seq_cst) noexcept;
338
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) volatile noexcept;
339
  integral fetch_sub(integral, memory_order = memory_order_seq_cst) noexcept;
340
  integral fetch_and(integral, memory_order = memory_order_seq_cst) volatile noexcept;
 
369
  integral operator|=(integral) volatile noexcept;
370
  integral operator|=(integral) noexcept;
371
  integral operator^=(integral) volatile noexcept;
372
  integral operator^=(integral) noexcept;
373
  };
374
+ }
375
+ ```
376
 
377
+ The atomic integral specializations are standard-layout structs. They
378
+ each have a trivial default constructor and a trivial destructor.
379
+
380
+ Descriptions are provided below only for members that differ from the
381
+ primary template.
382
+
383
+ The following operations perform arithmetic computations. The key,
384
+ operator, and computation correspondence is:
385
+
386
+ **Table: Atomic arithmetic computations** <a id="tab:atomic.arithmetic.computations">[tab:atomic.arithmetic.computations]</a>
387
+
388
+ | | | | | | |
389
+ | ----- | --- | -------------------- | ----- | --- | -------------------- |
390
+ | `add` | `+` | addition | `sub` | `-` | subtraction |
391
+ | `or` | `|` | bitwise inclusive or | `xor` | `^` | bitwise exclusive or |
392
+ | `and` | `&` | bitwise and | | | |
393
+
394
+ ``` cpp
395
+ T fetch_key(T operand, memory_order order = memory_order_seq_cst) volatile noexcept;
396
+ T fetch_key(T operand, memory_order order = memory_order_seq_cst) noexcept;
397
+ ```
398
+
399
+ *Effects:* Atomically replaces the value pointed to by `this` with the
400
+ result of the computation applied to the value pointed to by `this` and
401
+ the given `operand`. Memory is affected according to the value of
402
+ `order`. These operations are atomic read-modify-write
403
+ operations ([[intro.multithread]]).
404
+
405
+ *Returns:* Atomically, the value pointed to by `this` immediately before
406
+ the effects.
407
+
408
+ *Remarks:* For signed integer types, arithmetic is defined to use two’s
409
+ complement representation. There are no undefined results.
410
+
411
+ ``` cpp
412
+ T operator op=(T operand) volatile noexcept;
413
+ T operator op=(T operand) noexcept;
414
+ ```
415
+
416
+ *Effects:* Equivalent to:
417
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
418
+
419
+ ### Partial specialization for pointers <a id="atomics.types.pointer">[[atomics.types.pointer]]</a>
420
+
421
+ ``` cpp
422
+ namespace std {
423
  template <class T> struct atomic<T*> {
424
+ using value_type = T*;
425
+ using difference_type = ptrdiff_t;
426
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
427
  bool is_lock_free() const volatile noexcept;
428
  bool is_lock_free() const noexcept;
429
  void store(T*, memory_order = memory_order_seq_cst) volatile noexcept;
430
  void store(T*, memory_order = memory_order_seq_cst) noexcept;
431
  T* load(memory_order = memory_order_seq_cst) const volatile noexcept;
 
469
  T* operator-=(ptrdiff_t) noexcept;
470
  };
471
  }
472
  ```
473
 
474
+ There is a partial specialization of the `atomic` class template for
475
+ pointers. Specializations of this partial specialization are
476
+ standard-layout structs. They each have a trivial default constructor
477
+ and a trivial destructor.
478
+
479
+ Descriptions are provided below only for members that differ from the
480
+ primary template.
481
+
482
+ The following operations perform pointer arithmetic. The key, operator,
483
+ and computation correspondence is:
484
+
485
+ **Table: Atomic pointer computations** <a id="tab:atomic.pointer.computations">[tab:atomic.pointer.computations]</a>
486
+
487
+ | | | | | | |
488
+ | ----- | --- | -------- | ----- | --- | ----------- |
489
+ | `add` | `+` | addition | `sub` | `-` | subtraction |
490
+
491
+ ``` cpp
492
+ T* fetch_key(ptrdiff_t operand, memory_order order = memory_order_seq_cst) volatile noexcept;
493
+ T* fetch_key(ptrdiff_t operand, memory_order order = memory_order_seq_cst) noexcept;
494
+ ```
495
+
496
+ *Requires:* T shall be an object type, otherwise the program is
497
+ ill-formed.
498
+
499
+ [*Note 1*: Pointer arithmetic on `void*` or function pointers is
500
+ ill-formed. *end note*]
501
+
502
+ *Effects:* Atomically replaces the value pointed to by `this` with the
503
+ result of the computation applied to the value pointed to by `this` and
504
+ the given `operand`. Memory is affected according to the value of
505
+ `order`. These operations are atomic read-modify-write
506
+ operations ([[intro.multithread]]).
507
+
508
+ *Returns:* Atomically, the value pointed to by `this` immediately before
509
+ the effects.
510
+
511
+ *Remarks:* The result may be an undefined address, but the operations
512
+ otherwise have no undefined behavior.
513
+
514
+ ``` cpp
515
+ T* operator op=(ptrdiff_t operand) volatile noexcept;
516
+ T* operator op=(ptrdiff_t operand) noexcept;
517
+ ```
518
+
519
+ *Effects:* Equivalent to:
520
+ `return fetch_`*`key`*`(operand) `*`op`*` operand;`
521
+
522
+ ### Member operators common to integers and pointers to objects <a id="atomics.types.memop">[[atomics.types.memop]]</a>
523
+
524
+ ``` cpp
525
+ T operator++(int) volatile noexcept;
526
+ T operator++(int) noexcept;
527
+ ```
528
+
529
+ *Effects:* Equivalent to: `return fetch_add(1);`
530
+
531
+ ``` cpp
532
+ T operator--(int) volatile noexcept;
533
+ T operator--(int) noexcept;
534
+ ```
535
+
536
+ *Effects:* Equivalent to: `return fetch_sub(1);`
537
+
538
+ ``` cpp
539
+ T operator++() volatile noexcept;
540
+ T operator++() noexcept;
541
+ ```
542
+
543
+ *Effects:* Equivalent to: `return fetch_add(1) + 1;`
544
+
545
+ ``` cpp
546
+ T operator--() volatile noexcept;
547
+ T operator--() noexcept;
548
+ ```
549
+
550
+ *Effects:* Equivalent to: `return fetch_sub(1) - 1;`
551