From Jason Turner

[util.smartptr.atomic]

Diff to HTML by rtfpessoa

Files changed (1) hide show
  1. tmp/tmpfgf7ckqd/{from.md → to.md} +468 -0
tmp/tmpfgf7ckqd/{from.md → to.md} RENAMED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Partial specializations for smart pointers <a id="util.smartptr.atomic">[[util.smartptr.atomic]]</a>
2
+
3
+ The library provides partial specializations of the `atomic` template
4
+ for shared-ownership smart pointers [[smartptr]]. The behavior of all
5
+ operations is as specified in [[atomics.types.generic]], unless
6
+ specified otherwise. The template parameter `T` of these partial
7
+ specializations may be an incomplete type.
8
+
9
+ All changes to an atomic smart pointer in this subclause, and all
10
+ associated `use_count` increments, are guaranteed to be performed
11
+ atomically. Associated `use_count` decrements are sequenced after the
12
+ atomic operation, but are not required to be part of it. Any associated
13
+ deletion and deallocation are sequenced after the atomic update step and
14
+ are not part of the atomic operation.
15
+
16
+ [*Note 1*: If the atomic operation uses locks, locks acquired by the
17
+ implementation will be held when any `use_count` adjustments are
18
+ performed, and will not be held when any destruction or deallocation
19
+ resulting from this is performed. — *end note*]
20
+
21
+ [*Example 1*:
22
+
23
+ ``` cpp
24
+ template<typename T> class atomic_list {
25
+ struct node {
26
+ T t;
27
+ shared_ptr<node> next;
28
+ };
29
+ atomic<shared_ptr<node>> head;
30
+
31
+ public:
32
+ auto find(T t) const {
33
+ auto p = head.load();
34
+ while (p && p->t != t)
35
+ p = p->next;
36
+
37
+ return shared_ptr<node>(move(p));
38
+ }
39
+
40
+ void push_front(T t) {
41
+ auto p = make_shared<node>();
42
+ p->t = t;
43
+ p->next = head;
44
+ while (!head.compare_exchange_weak(p->next, p)) {}
45
+ }
46
+ };
47
+ ```
48
+
49
+ — *end example*]
50
+
51
+ #### Partial specialization for `shared_ptr` <a id="util.smartptr.atomic.shared">[[util.smartptr.atomic.shared]]</a>
52
+
53
+ ``` cpp
54
+ namespace std {
55
+ template<class T> struct atomic<shared_ptr<T>> {
56
+ using value_type = shared_ptr<T>;
57
+
58
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
59
+ bool is_lock_free() const noexcept;
60
+
61
+ constexpr atomic() noexcept;
62
+ atomic(shared_ptr<T> desired) noexcept;
63
+ atomic(const atomic&) = delete;
64
+ void operator=(const atomic&) = delete;
65
+
66
+ shared_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
67
+ operator shared_ptr<T>() const noexcept;
68
+ void store(shared_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
69
+ void operator=(shared_ptr<T> desired) noexcept;
70
+
71
+ shared_ptr<T> exchange(shared_ptr<T> desired,
72
+ memory_order order = memory_order::seq_cst) noexcept;
73
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
74
+ memory_order success, memory_order failure) noexcept;
75
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
76
+ memory_order success, memory_order failure) noexcept;
77
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
78
+ memory_order order = memory_order::seq_cst) noexcept;
79
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
80
+ memory_order order = memory_order::seq_cst) noexcept;
81
+
82
+ void wait(shared_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
83
+ void notify_one() noexcept;
84
+ void notify_all() noexcept;
85
+
86
+ private:
87
+ shared_ptr<T> p; // exposition only
88
+ };
89
+ }
90
+ ```
91
+
92
+ ``` cpp
93
+ constexpr atomic() noexcept;
94
+ ```
95
+
96
+ *Effects:* Initializes `p{}`.
97
+
98
+ ``` cpp
99
+ atomic(shared_ptr<T> desired) noexcept;
100
+ ```
101
+
102
+ *Effects:* Initializes the object with the value `desired`.
103
+ Initialization is not an atomic operation [[intro.multithread]].
104
+
105
+ [*Note 1*: It is possible to have an access to an atomic object `A`
106
+ race with its construction, for example, by communicating the address of
107
+ the just-constructed object `A` to another thread via
108
+ `memory_order::relaxed` operations on a suitable atomic pointer
109
+ variable, and then immediately accessing `A` in the receiving thread.
110
+ This results in undefined behavior. — *end note*]
111
+
112
+ ``` cpp
113
+ void store(shared_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
114
+ ```
115
+
116
+ *Preconditions:* `order` is neither `memory_order::consume`,
117
+ `memory_order::acquire`, nor `memory_order::acq_rel`.
118
+
119
+ *Effects:* Atomically replaces the value pointed to by `this` with the
120
+ value of `desired` as if by `p.swap(desired)`. Memory is affected
121
+ according to the value of `order`.
122
+
123
+ ``` cpp
124
+ void operator=(shared_ptr<T> desired) noexcept;
125
+ ```
126
+
127
+ *Effects:* Equivalent to `store(desired)`.
128
+
129
+ ``` cpp
130
+ shared_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
131
+ ```
132
+
133
+ *Preconditions:* `order` is neither `memory_order::release` nor
134
+ `memory_order::acq_rel`.
135
+
136
+ *Effects:* Memory is affected according to the value of `order`.
137
+
138
+ *Returns:* Atomically returns `p`.
139
+
140
+ ``` cpp
141
+ operator shared_ptr<T>() const noexcept;
142
+ ```
143
+
144
+ *Effects:* Equivalent to: `return load();`
145
+
146
+ ``` cpp
147
+ shared_ptr<T> exchange(shared_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
148
+ ```
149
+
150
+ *Effects:* Atomically replaces `p` with `desired` as if by
151
+ `p.swap(desired)`. Memory is affected according to the value of `order`.
152
+ This is an atomic read-modify-write operation [[intro.races]].
153
+
154
+ *Returns:* Atomically returns the value of `p` immediately before the
155
+ effects.
156
+
157
+ ``` cpp
158
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
159
+ memory_order success, memory_order failure) noexcept;
160
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
161
+ memory_order success, memory_order failure) noexcept;
162
+ ```
163
+
164
+ *Preconditions:* `failure` is neither `memory_order::release` nor
165
+ `memory_order::acq_rel`.
166
+
167
+ *Effects:* If `p` is equivalent to `expected`, assigns `desired` to `p`
168
+ and has synchronization semantics corresponding to the value of
169
+ `success`, otherwise assigns `p` to `expected` and has synchronization
170
+ semantics corresponding to the value of `failure`.
171
+
172
+ *Returns:* `true` if `p` was equivalent to `expected`, `false`
173
+ otherwise.
174
+
175
+ *Remarks:* Two `shared_ptr` objects are equivalent if they store the
176
+ same pointer value and either share ownership or are both empty. The
177
+ weak form may fail spuriously. See [[atomics.types.operations]].
178
+
179
+ If the operation returns `true`, `expected` is not accessed after the
180
+ atomic update and the operation is an atomic read-modify-write
181
+ operation [[intro.multithread]] on the memory pointed to by `this`.
182
+ Otherwise, the operation is an atomic load operation on that memory, and
183
+ `expected` is updated with the existing value read from the atomic
184
+ object in the attempted atomic update. The `use_count` update
185
+ corresponding to the write to `expected` is part of the atomic
186
+ operation. The write to `expected` itself is not required to be part of
187
+ the atomic operation.
188
+
189
+ ``` cpp
190
+ bool compare_exchange_weak(shared_ptr<T>& expected, shared_ptr<T> desired,
191
+ memory_order order = memory_order::seq_cst) noexcept;
192
+ ```
193
+
194
+ *Effects:* Equivalent to:
195
+
196
+ ``` cpp
197
+ return compare_exchange_weak(expected, desired, order, fail_order);
198
+ ```
199
+
200
+ where `fail_order` is the same as `order` except that a value of
201
+ `memory_order::acq_rel` shall be replaced by the value
202
+ `memory_order::acquire` and a value of `memory_order::release` shall be
203
+ replaced by the value `memory_order::relaxed`.
204
+
205
+ ``` cpp
206
+ bool compare_exchange_strong(shared_ptr<T>& expected, shared_ptr<T> desired,
207
+ memory_order order = memory_order::seq_cst) noexcept;
208
+ ```
209
+
210
+ *Effects:* Equivalent to:
211
+
212
+ ``` cpp
213
+ return compare_exchange_strong(expected, desired, order, fail_order);
214
+ ```
215
+
216
+ where `fail_order` is the same as `order` except that a value of
217
+ `memory_order::acq_rel` shall be replaced by the value
218
+ `memory_order::acquire` and a value of `memory_order::release` shall be
219
+ replaced by the value `memory_order::relaxed`.
220
+
221
+ ``` cpp
222
+ void wait(shared_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
223
+ ```
224
+
225
+ *Preconditions:* `order` is neither `memory_order::release` nor
226
+ `memory_order::acq_rel`.
227
+
228
+ *Effects:* Repeatedly performs the following steps, in order:
229
+
230
+ - Evaluates `load(order)` and compares it to `old`.
231
+ - If the two are not equivalent, returns.
232
+ - Blocks until it is unblocked by an atomic notifying operation or is
233
+ unblocked spuriously.
234
+
235
+ *Remarks:* Two `shared_ptr` objects are equivalent if they store the
236
+ same pointer and either share ownership or are both empty. This function
237
+ is an atomic waiting operation [[atomics.wait]].
238
+
239
+ ``` cpp
240
+ void notify_one() noexcept;
241
+ ```
242
+
243
+ *Effects:* Unblocks the execution of at least one atomic waiting
244
+ operation that is eligible to be unblocked [[atomics.wait]] by this
245
+ call, if any such atomic waiting operations exist.
246
+
247
+ *Remarks:* This function is an atomic notifying
248
+ operation [[atomics.wait]].
249
+
250
+ ``` cpp
251
+ void notify_all() noexcept;
252
+ ```
253
+
254
+ *Effects:* Unblocks the execution of all atomic waiting operations that
255
+ are eligible to be unblocked [[atomics.wait]] by this call.
256
+
257
+ *Remarks:* This function is an atomic notifying
258
+ operation [[atomics.wait]].
259
+
260
+ #### Partial specialization for `weak_ptr` <a id="util.smartptr.atomic.weak">[[util.smartptr.atomic.weak]]</a>
261
+
262
+ ``` cpp
263
+ namespace std {
264
+ template<class T> struct atomic<weak_ptr<T>> {
265
+ using value_type = weak_ptr<T>;
266
+
267
+ static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
268
+ bool is_lock_free() const noexcept;
269
+
270
+ constexpr atomic() noexcept;
271
+ atomic(weak_ptr<T> desired) noexcept;
272
+ atomic(const atomic&) = delete;
273
+ void operator=(const atomic&) = delete;
274
+
275
+ weak_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
276
+ operator weak_ptr<T>() const noexcept;
277
+ void store(weak_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
278
+ void operator=(weak_ptr<T> desired) noexcept;
279
+
280
+ weak_ptr<T> exchange(weak_ptr<T> desired,
281
+ memory_order order = memory_order::seq_cst) noexcept;
282
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
283
+ memory_order success, memory_order failure) noexcept;
284
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
285
+ memory_order success, memory_order failure) noexcept;
286
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
287
+ memory_order order = memory_order::seq_cst) noexcept;
288
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
289
+ memory_order order = memory_order::seq_cst) noexcept;
290
+
291
+ void wait(weak_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
292
+ void notify_one() noexcept;
293
+ void notify_all() noexcept;
294
+
295
+ private:
296
+ weak_ptr<T> p; // exposition only
297
+ };
298
+ }
299
+ ```
300
+
301
+ ``` cpp
302
+ constexpr atomic() noexcept;
303
+ ```
304
+
305
+ *Effects:* Initializes `p{}`.
306
+
307
+ ``` cpp
308
+ atomic(weak_ptr<T> desired) noexcept;
309
+ ```
310
+
311
+ *Effects:* Initializes the object with the value `desired`.
312
+ Initialization is not an atomic operation [[intro.multithread]].
313
+
314
+ [*Note 1*: It is possible to have an access to an atomic object `A`
315
+ race with its construction, for example, by communicating the address of
316
+ the just-constructed object `A` to another thread via
317
+ `memory_order::relaxed` operations on a suitable atomic pointer
318
+ variable, and then immediately accessing `A` in the receiving thread.
319
+ This results in undefined behavior. — *end note*]
320
+
321
+ ``` cpp
322
+ void store(weak_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
323
+ ```
324
+
325
+ *Preconditions:* `order` is neither `memory_order::consume`,
326
+ `memory_order::acquire`, nor `memory_order::acq_rel`.
327
+
328
+ *Effects:* Atomically replaces the value pointed to by `this` with the
329
+ value of `desired` as if by `p.swap(desired)`. Memory is affected
330
+ according to the value of `order`.
331
+
332
+ ``` cpp
333
+ void operator=(weak_ptr<T> desired) noexcept;
334
+ ```
335
+
336
+ *Effects:* Equivalent to `store(desired)`.
337
+
338
+ ``` cpp
339
+ weak_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
340
+ ```
341
+
342
+ *Preconditions:* `order` is neither `memory_order::release` nor
343
+ `memory_order::acq_rel`.
344
+
345
+ *Effects:* Memory is affected according to the value of `order`.
346
+
347
+ *Returns:* Atomically returns `p`.
348
+
349
+ ``` cpp
350
+ operator weak_ptr<T>() const noexcept;
351
+ ```
352
+
353
+ *Effects:* Equivalent to: `return load();`
354
+
355
+ ``` cpp
356
+ weak_ptr<T> exchange(weak_ptr<T> desired, memory_order order = memory_order::seq_cst) noexcept;
357
+ ```
358
+
359
+ *Effects:* Atomically replaces `p` with `desired` as if by
360
+ `p.swap(desired)`. Memory is affected according to the value of `order`.
361
+ This is an atomic read-modify-write operation [[intro.races]].
362
+
363
+ *Returns:* Atomically returns the value of `p` immediately before the
364
+ effects.
365
+
366
+ ``` cpp
367
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
368
+ memory_order success, memory_order failure) noexcept;
369
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
370
+ memory_order success, memory_order failure) noexcept;
371
+ ```
372
+
373
+ *Preconditions:* `failure` is neither `memory_order::release` nor
374
+ `memory_order::acq_rel`.
375
+
376
+ *Effects:* If `p` is equivalent to `expected`, assigns `desired` to `p`
377
+ and has synchronization semantics corresponding to the value of
378
+ `success`, otherwise assigns `p` to `expected` and has synchronization
379
+ semantics corresponding to the value of `failure`.
380
+
381
+ *Returns:* `true` if `p` was equivalent to `expected`, `false`
382
+ otherwise.
383
+
384
+ *Remarks:* Two `weak_ptr` objects are equivalent if they store the same
385
+ pointer value and either share ownership or are both empty. The weak
386
+ form may fail spuriously. See [[atomics.types.operations]].
387
+
388
+ If the operation returns `true`, `expected` is not accessed after the
389
+ atomic update and the operation is an atomic read-modify-write
390
+ operation [[intro.multithread]] on the memory pointed to by `this`.
391
+ Otherwise, the operation is an atomic load operation on that memory, and
392
+ `expected` is updated with the existing value read from the atomic
393
+ object in the attempted atomic update. The `use_count` update
394
+ corresponding to the write to `expected` is part of the atomic
395
+ operation. The write to `expected` itself is not required to be part of
396
+ the atomic operation.
397
+
398
+ ``` cpp
399
+ bool compare_exchange_weak(weak_ptr<T>& expected, weak_ptr<T> desired,
400
+ memory_order order = memory_order::seq_cst) noexcept;
401
+ ```
402
+
403
+ *Effects:* Equivalent to:
404
+
405
+ ``` cpp
406
+ return compare_exchange_weak(expected, desired, order, fail_order);
407
+ ```
408
+
409
+ where `fail_order` is the same as `order` except that a value of
410
+ `memory_order::acq_rel` shall be replaced by the value
411
+ `memory_order::acquire` and a value of `memory_order::release` shall be
412
+ replaced by the value `memory_order::relaxed`.
413
+
414
+ ``` cpp
415
+ bool compare_exchange_strong(weak_ptr<T>& expected, weak_ptr<T> desired,
416
+ memory_order order = memory_order::seq_cst) noexcept;
417
+ ```
418
+
419
+ *Effects:* Equivalent to:
420
+
421
+ ``` cpp
422
+ return compare_exchange_strong(expected, desired, order, fail_order);
423
+ ```
424
+
425
+ where `fail_order` is the same as `order` except that a value of
426
+ `memory_order::acq_rel` shall be replaced by the value
427
+ `memory_order::acquire` and a value of `memory_order::release` shall be
428
+ replaced by the value `memory_order::relaxed`.
429
+
430
+ ``` cpp
431
+ void wait(weak_ptr<T> old, memory_order order = memory_order::seq_cst) const noexcept;
432
+ ```
433
+
434
+ *Preconditions:* `order` is neither `memory_order::release` nor
435
+ `memory_order::acq_rel`.
436
+
437
+ *Effects:* Repeatedly performs the following steps, in order:
438
+
439
+ - Evaluates `load(order)` and compares it to `old`.
440
+ - If the two are not equivalent, returns.
441
+ - Blocks until it is unblocked by an atomic notifying operation or is
442
+ unblocked spuriously.
443
+
444
+ *Remarks:* Two `weak_ptr` objects are equivalent if they store the same
445
+ pointer and either share ownership or are both empty. This function is
446
+ an atomic waiting operation [[atomics.wait]].
447
+
448
+ ``` cpp
449
+ void notify_one() noexcept;
450
+ ```
451
+
452
+ *Effects:* Unblocks the execution of at least one atomic waiting
453
+ operation that is eligible to be unblocked [[atomics.wait]] by this
454
+ call, if any such atomic waiting operations exist.
455
+
456
+ *Remarks:* This function is an atomic notifying
457
+ operation [[atomics.wait]].
458
+
459
+ ``` cpp
460
+ void notify_all() noexcept;
461
+ ```
462
+
463
+ *Effects:* Unblocks the execution of all atomic waiting operations that
464
+ are eligible to be unblocked [[atomics.wait]] by this call.
465
+
466
+ *Remarks:* This function is an atomic notifying
467
+ operation [[atomics.wait]].
468
+