tmp/tmpunh6wn27/{from.md → to.md}
RENAMED
|
@@ -72,13 +72,13 @@ namespace std {
|
|
| 72 |
bool atomic_compare_exchange_weak(atomic-type*, T*, T) noexcept;
|
| 73 |
bool atomic_compare_exchange_strong(volatile atomic-type*, T*, T) noexcept;
|
| 74 |
bool atomic_compare_exchange_strong(atomic-type*, T*, T) noexcept;
|
| 75 |
bool atomic_compare_exchange_weak_explicit(volatile atomic-type*, T*, T,
|
| 76 |
memory_order, memory_order) noexcept;
|
| 77 |
-
bool atomic_compare_exchange_weak_explicit(atomic-type*, T*, T
|
| 78 |
memory_order, memory_order) noexcept;
|
| 79 |
-
bool
|
| 80 |
memory_order, memory_order) noexcept;
|
| 81 |
bool atomic_compare_exchange_strong_explicit(atomic-type*, T*, T,
|
| 82 |
memory_order, memory_order) noexcept;
|
| 83 |
|
| 84 |
// [atomics.types.operations.templ], templated operations on atomic types
|
|
@@ -233,16 +233,14 @@ orders for all affected locations, such that each `memory_order_seq_cst`
|
|
| 233 |
operation *B* that loads a value from an atomic object *M* observes one
|
| 234 |
of the following values:
|
| 235 |
|
| 236 |
- the result of the last modification *A* of *M* that precedes *B* in
|
| 237 |
*S*, if it exists, or
|
| 238 |
-
- if *A* exists, the result of some modification of *M*
|
| 239 |
-
sequence of side effects with respect to *B* that is not
|
| 240 |
`memory_order_seq_cst` and that does not happen before *A*, or
|
| 241 |
-
- if *A* does not exist, the result of some modification of *M*
|
| 242 |
-
|
| 243 |
-
`memory_order_seq_cst`.
|
| 244 |
|
| 245 |
Although it is not explicitly required that *S* include locks, it can
|
| 246 |
always be extended to an order that does include lock and unlock
|
| 247 |
operations, since the ordering between those is already included in the
|
| 248 |
“happens before” ordering.
|
|
@@ -264,63 +262,33 @@ modifies *M* and *B* takes its value, if there are
|
|
| 264 |
`memory_order_seq_cst` fences *X* and *Y* such that *A* is sequenced
|
| 265 |
before *X*, *Y* is sequenced before *B*, and *X* precedes *Y* in *S*,
|
| 266 |
then *B* observes either the effects of *A* or a later modification of
|
| 267 |
*M* in its modification order.
|
| 268 |
|
| 269 |
-
For atomic
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
|
| 274 |
`memory_order_seq_cst` ensures sequential consistency only for a program
|
| 275 |
that is free of data races and uses exclusively `memory_order_seq_cst`
|
| 276 |
operations. Any use of weaker ordering will invalidate this guarantee
|
| 277 |
unless extreme care is used. In particular, `memory_order_seq_cst`
|
| 278 |
fences ensure a total order only for the fences themselves. Fences
|
| 279 |
cannot, in general, be used to restore sequential consistency for atomic
|
| 280 |
operations with weaker ordering specifications.
|
| 281 |
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
evaluations, such that each evaluation observes the values of variables
|
| 285 |
-
as computed by the last prior assignment in the sequence. The ordering
|
| 286 |
-
of evaluations in this sequence shall be such that:
|
| 287 |
|
| 288 |
-
|
| 289 |
-
thread, then *B* does not happen before *A*, and
|
| 290 |
-
- if an evaluation *A* is included in the sequence, then every
|
| 291 |
-
evaluation that assigns to the same variable and happens before *A* is
|
| 292 |
-
included.
|
| 293 |
-
|
| 294 |
-
The second requirement disallows “out-of-thin-air” or “speculative”
|
| 295 |
-
stores of atomics when relaxed atomics are used. Since unordered
|
| 296 |
-
operations are involved, evaluations may appear in this sequence out of
|
| 297 |
-
thread order. For example, with `x` and `y` initially zero,
|
| 298 |
-
|
| 299 |
-
``` cpp
|
| 300 |
-
// Thread 1:
|
| 301 |
-
r1 = y.load(memory_order_relaxed);
|
| 302 |
-
x.store(r1, memory_order_relaxed);
|
| 303 |
-
```
|
| 304 |
-
|
| 305 |
-
``` cpp
|
| 306 |
-
// Thread 2:
|
| 307 |
-
r2 = x.load(memory_order_relaxed);
|
| 308 |
-
y.store(42, memory_order_relaxed);
|
| 309 |
-
```
|
| 310 |
-
|
| 311 |
-
is allowed to produce `r1 = r2 = 42`. The sequence of evaluations
|
| 312 |
-
justifying this consists of:
|
| 313 |
-
|
| 314 |
-
``` cpp
|
| 315 |
-
y.store(42, memory_order_relaxed);
|
| 316 |
-
r1 = y.load(memory_order_relaxed);
|
| 317 |
-
x.store(r1, memory_order_relaxed);
|
| 318 |
-
r2 = x.load(memory_order_relaxed);
|
| 319 |
-
```
|
| 320 |
-
|
| 321 |
-
On the other hand,
|
| 322 |
|
| 323 |
``` cpp
|
| 324 |
// Thread 1:
|
| 325 |
r1 = y.load(memory_order_relaxed);
|
| 326 |
x.store(r1, memory_order_relaxed);
|
|
@@ -330,32 +298,30 @@ x.store(r1, memory_order_relaxed);
|
|
| 330 |
// Thread 2:
|
| 331 |
r2 = x.load(memory_order_relaxed);
|
| 332 |
y.store(r2, memory_order_relaxed);
|
| 333 |
```
|
| 334 |
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
|
| 340 |
-
The
|
| 341 |
-
with `x` and `y` initially zero:
|
| 342 |
|
| 343 |
``` cpp
|
| 344 |
// Thread 1:
|
| 345 |
r1 = x.load(memory_order_relaxed);
|
| 346 |
-
if (r1 == 42) y.store(
|
| 347 |
```
|
| 348 |
|
| 349 |
``` cpp
|
| 350 |
// Thread 2:
|
| 351 |
r2 = y.load(memory_order_relaxed);
|
| 352 |
if (r2 == 42) x.store(42, memory_order_relaxed);
|
| 353 |
```
|
| 354 |
|
| 355 |
-
However, implementations should not allow such behavior.
|
| 356 |
-
|
| 357 |
Atomic read-modify-write operations shall always read the last value (in
|
| 358 |
the modification order) written before the write associated with the
|
| 359 |
read-modify-write operation.
|
| 360 |
|
| 361 |
Implementations should make atomic stores visible to atomic loads within
|
|
@@ -555,11 +521,11 @@ defined in [[atomics.types.operations]].
|
|
| 555 |
|
| 556 |
Specializations and instantiations of the `atomic` template shall have a
|
| 557 |
deleted copy constructor, a deleted copy assignment operator, and a
|
| 558 |
constexpr value constructor.
|
| 559 |
|
| 560 |
-
There shall be
|
| 561 |
integral types `char`, `signed char`, `unsigned char`, `short`,
|
| 562 |
`unsigned short`, `int`, `unsigned int`, `long`, `unsigned long`,
|
| 563 |
`long long`, `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`,
|
| 564 |
and any other types needed by the typedefs in the header `<cstdint>`.
|
| 565 |
For each integral type *integral*, the specialization `atomic<integral>`
|
|
@@ -579,11 +545,11 @@ default constructors, and trivial destructors. They shall each support
|
|
| 579 |
aggregate initialization syntax.
|
| 580 |
|
| 581 |
There shall be named types corresponding to the integral specializations
|
| 582 |
of `atomic`, as specified in Table [[tab:atomics.integral]], and a
|
| 583 |
named type `atomic_bool` corresponding to the specified `atomic<bool>`.
|
| 584 |
-
Each named type is
|
| 585 |
or a base class of the corresponding specialization. If it is a base
|
| 586 |
class, it shall support the same member functions as the corresponding
|
| 587 |
specialization.
|
| 588 |
|
| 589 |
There shall be atomic typedefs corresponding to the typedefs in the
|
|
@@ -640,19 +606,17 @@ kind. The specific instances are defined in [[atomics.types.generic]],
|
|
| 640 |
[[atomics.types.operations.pointer]].
|
| 641 |
|
| 642 |
In the following operation definitions:
|
| 643 |
|
| 644 |
- an *A* refers to one of the atomic types.
|
| 645 |
-
- a *C* refers to its corresponding non-atomic type.
|
| 646 |
-
`atomic_address` atomic type corresponds to the `void*` non-atomic
|
| 647 |
-
type.
|
| 648 |
- an *M* refers to type of the other argument for arithmetic operations.
|
| 649 |
For integral atomic types, *M* is *C*. For atomic address types, *M*
|
| 650 |
is `std::ptrdiff_t`.
|
| 651 |
-
- the
|
| 652 |
-
their corresponding `_explicit` with `memory_order`
|
| 653 |
-
`memory_order_seq_cst`.
|
| 654 |
|
| 655 |
Many operations are volatile-qualified. The “volatile as device
|
| 656 |
register” semantics have not changed in the standard. This qualification
|
| 657 |
means that volatility is preserved when applying these operations to
|
| 658 |
volatile objects. It does not mean that operations on non-volatile
|
|
@@ -1000,18 +964,22 @@ The `atomic_flag` type shall have standard layout. It shall have a
|
|
| 1000 |
trivial default constructor, a deleted copy constructor, a deleted copy
|
| 1001 |
assignment operator, and a trivial destructor.
|
| 1002 |
|
| 1003 |
The macro `ATOMIC_FLAG_INIT` shall be defined in such a way that it can
|
| 1004 |
be used to initialize an object of type `atomic_flag` to the clear
|
| 1005 |
-
state.
|
| 1006 |
-
static. It is unspecified whether an uninitialized `atomic_flag` object
|
| 1007 |
-
has an initial state of set or clear.
|
| 1008 |
|
| 1009 |
``` cpp
|
| 1010 |
atomic_flag guard = ATOMIC_FLAG_INIT;
|
| 1011 |
```
|
| 1012 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1013 |
``` cpp
|
| 1014 |
bool atomic_flag_test_and_set(volatile atomic_flag* object) noexcept;
|
| 1015 |
bool atomic_flag_test_and_set(atomic_flag* object) noexcept;
|
| 1016 |
bool atomic_flag_test_and_set_explicit(volatile atomic_flag* object, memory_order order) noexcept;
|
| 1017 |
bool atomic_flag_test_and_set_explicit(atomic_flag* object, memory_order order) noexcept;
|
|
@@ -1034,12 +1002,12 @@ void atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order
|
|
| 1034 |
void atomic_flag_clear_explicit(atomic_flag* object, memory_order order) noexcept;
|
| 1035 |
void atomic_flag::clear(memory_order order = memory_order_seq_cst) volatile noexcept;
|
| 1036 |
void atomic_flag::clear(memory_order order = memory_order_seq_cst) noexcept;
|
| 1037 |
```
|
| 1038 |
|
| 1039 |
-
*Requires:* The `order` argument shall not be `
|
| 1040 |
-
`memory_order_acq_rel`.
|
| 1041 |
|
| 1042 |
*Effects:* Atomically sets the value pointed to by `object` or by `this`
|
| 1043 |
to false. Memory is affected according to the value of `order`.
|
| 1044 |
|
| 1045 |
## Fences <a id="atomics.fences">[[atomics.fences]]</a>
|
|
@@ -1086,11 +1054,11 @@ extern "C" void atomic_thread_fence(memory_order order) noexcept;
|
|
| 1086 |
|
| 1087 |
``` cpp
|
| 1088 |
extern "C" void atomic_signal_fence(memory_order order) noexcept;
|
| 1089 |
```
|
| 1090 |
|
| 1091 |
-
*Effects:*
|
| 1092 |
resulting ordering constraints are established only between a thread and
|
| 1093 |
a signal handler executed in the same thread.
|
| 1094 |
|
| 1095 |
*Note:* `atomic_signal_fence` can be used to specify the order in which
|
| 1096 |
actions performed by the thread become visible to the signal handler.
|
|
|
|
| 72 |
bool atomic_compare_exchange_weak(atomic-type*, T*, T) noexcept;
|
| 73 |
bool atomic_compare_exchange_strong(volatile atomic-type*, T*, T) noexcept;
|
| 74 |
bool atomic_compare_exchange_strong(atomic-type*, T*, T) noexcept;
|
| 75 |
bool atomic_compare_exchange_weak_explicit(volatile atomic-type*, T*, T,
|
| 76 |
memory_order, memory_order) noexcept;
|
| 77 |
+
bool atomic_compare_exchange_weak_explicit(atomic-type*, T*, T,
|
| 78 |
memory_order, memory_order) noexcept;
|
| 79 |
+
bool atomic_compare_exchange_strong_explicit(volatile atomic-type*, T*, T,
|
| 80 |
memory_order, memory_order) noexcept;
|
| 81 |
bool atomic_compare_exchange_strong_explicit(atomic-type*, T*, T,
|
| 82 |
memory_order, memory_order) noexcept;
|
| 83 |
|
| 84 |
// [atomics.types.operations.templ], templated operations on atomic types
|
|
|
|
| 233 |
operation *B* that loads a value from an atomic object *M* observes one
|
| 234 |
of the following values:
|
| 235 |
|
| 236 |
- the result of the last modification *A* of *M* that precedes *B* in
|
| 237 |
*S*, if it exists, or
|
| 238 |
+
- if *A* exists, the result of some modification of *M* that is not
|
|
|
|
| 239 |
`memory_order_seq_cst` and that does not happen before *A*, or
|
| 240 |
+
- if *A* does not exist, the result of some modification of *M* that is
|
| 241 |
+
not `memory_order_seq_cst`.
|
|
|
|
| 242 |
|
| 243 |
Although it is not explicitly required that *S* include locks, it can
|
| 244 |
always be extended to an order that does include lock and unlock
|
| 245 |
operations, since the ordering between those is already included in the
|
| 246 |
“happens before” ordering.
|
|
|
|
| 262 |
`memory_order_seq_cst` fences *X* and *Y* such that *A* is sequenced
|
| 263 |
before *X*, *Y* is sequenced before *B*, and *X* precedes *Y* in *S*,
|
| 264 |
then *B* observes either the effects of *A* or a later modification of
|
| 265 |
*M* in its modification order.
|
| 266 |
|
| 267 |
+
For atomic modifications *A* and *B* of an atomic object *M*, *B* occurs
|
| 268 |
+
later than *A* in the modification order of *M* if:
|
| 269 |
+
|
| 270 |
+
- there is a `memory_order_seq_cst` fence *X* such that *A* is sequenced
|
| 271 |
+
before *X*, and *X* precedes *B* in *S*, or
|
| 272 |
+
- there is a `memory_order_seq_cst` fence *Y* such that *Y* is sequenced
|
| 273 |
+
before *B*, and *A* precedes *Y* in *S*, or
|
| 274 |
+
- there are `memory_order_seq_cst` fences *X* and *Y* such that *A* is
|
| 275 |
+
sequenced before *X*, *Y* is sequenced before *B*, and *X* precedes
|
| 276 |
+
*Y* in *S*.
|
| 277 |
|
| 278 |
`memory_order_seq_cst` ensures sequential consistency only for a program
|
| 279 |
that is free of data races and uses exclusively `memory_order_seq_cst`
|
| 280 |
operations. Any use of weaker ordering will invalidate this guarantee
|
| 281 |
unless extreme care is used. In particular, `memory_order_seq_cst`
|
| 282 |
fences ensure a total order only for the fences themselves. Fences
|
| 283 |
cannot, in general, be used to restore sequential consistency for atomic
|
| 284 |
operations with weaker ordering specifications.
|
| 285 |
|
| 286 |
+
Implementations should ensure that no “out-of-thin-air” values are
|
| 287 |
+
computed that circularly depend on their own computation.
|
|
|
|
|
|
|
|
|
|
| 288 |
|
| 289 |
+
For example, with `x` and `y` initially zero,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
|
| 291 |
``` cpp
|
| 292 |
// Thread 1:
|
| 293 |
r1 = y.load(memory_order_relaxed);
|
| 294 |
x.store(r1, memory_order_relaxed);
|
|
|
|
| 298 |
// Thread 2:
|
| 299 |
r2 = x.load(memory_order_relaxed);
|
| 300 |
y.store(r2, memory_order_relaxed);
|
| 301 |
```
|
| 302 |
|
| 303 |
+
should not produce `r1 == r2 == 42`, since the store of 42 to `y` is
|
| 304 |
+
only possible if the store to `x` stores `42`, which circularly depends
|
| 305 |
+
on the store to `y` storing `42`. Note that without this restriction,
|
| 306 |
+
such an execution is possible.
|
| 307 |
|
| 308 |
+
The recommendation similarly disallows `r1 == r2 == 42` in the following
|
| 309 |
+
example, with `x` and `y` again initially zero:
|
| 310 |
|
| 311 |
``` cpp
|
| 312 |
// Thread 1:
|
| 313 |
r1 = x.load(memory_order_relaxed);
|
| 314 |
+
if (r1 == 42) y.store(42, memory_order_relaxed);
|
| 315 |
```
|
| 316 |
|
| 317 |
``` cpp
|
| 318 |
// Thread 2:
|
| 319 |
r2 = y.load(memory_order_relaxed);
|
| 320 |
if (r2 == 42) x.store(42, memory_order_relaxed);
|
| 321 |
```
|
| 322 |
|
|
|
|
|
|
|
| 323 |
Atomic read-modify-write operations shall always read the last value (in
|
| 324 |
the modification order) written before the write associated with the
|
| 325 |
read-modify-write operation.
|
| 326 |
|
| 327 |
Implementations should make atomic stores visible to atomic loads within
|
|
|
|
| 521 |
|
| 522 |
Specializations and instantiations of the `atomic` template shall have a
|
| 523 |
deleted copy constructor, a deleted copy assignment operator, and a
|
| 524 |
constexpr value constructor.
|
| 525 |
|
| 526 |
+
There shall be explicit specializations of the `atomic` template for the
|
| 527 |
integral types `char`, `signed char`, `unsigned char`, `short`,
|
| 528 |
`unsigned short`, `int`, `unsigned int`, `long`, `unsigned long`,
|
| 529 |
`long long`, `unsigned long long`, `char16_t`, `char32_t`, `wchar_t`,
|
| 530 |
and any other types needed by the typedefs in the header `<cstdint>`.
|
| 531 |
For each integral type *integral*, the specialization `atomic<integral>`
|
|
|
|
| 545 |
aggregate initialization syntax.
|
| 546 |
|
| 547 |
There shall be named types corresponding to the integral specializations
|
| 548 |
of `atomic`, as specified in Table [[tab:atomics.integral]], and a
|
| 549 |
named type `atomic_bool` corresponding to the specified `atomic<bool>`.
|
| 550 |
+
Each named type is either a typedef to the corresponding specialization
|
| 551 |
or a base class of the corresponding specialization. If it is a base
|
| 552 |
class, it shall support the same member functions as the corresponding
|
| 553 |
specialization.
|
| 554 |
|
| 555 |
There shall be atomic typedefs corresponding to the typedefs in the
|
|
|
|
| 606 |
[[atomics.types.operations.pointer]].
|
| 607 |
|
| 608 |
In the following operation definitions:
|
| 609 |
|
| 610 |
- an *A* refers to one of the atomic types.
|
| 611 |
+
- a *C* refers to its corresponding non-atomic type.
|
|
|
|
|
|
|
| 612 |
- an *M* refers to type of the other argument for arithmetic operations.
|
| 613 |
For integral atomic types, *M* is *C*. For atomic address types, *M*
|
| 614 |
is `std::ptrdiff_t`.
|
| 615 |
+
- the non-member functions not ending in `_explicit` have the semantics
|
| 616 |
+
of their corresponding `_explicit` functions with `memory_order`
|
| 617 |
+
arguments of `memory_order_seq_cst`.
|
| 618 |
|
| 619 |
Many operations are volatile-qualified. The “volatile as device
|
| 620 |
register” semantics have not changed in the standard. This qualification
|
| 621 |
means that volatility is preserved when applying these operations to
|
| 622 |
volatile objects. It does not mean that operations on non-volatile
|
|
|
|
| 964 |
trivial default constructor, a deleted copy constructor, a deleted copy
|
| 965 |
assignment operator, and a trivial destructor.
|
| 966 |
|
| 967 |
The macro `ATOMIC_FLAG_INIT` shall be defined in such a way that it can
|
| 968 |
be used to initialize an object of type `atomic_flag` to the clear
|
| 969 |
+
state. The macro can be used in the form:
|
|
|
|
|
|
|
| 970 |
|
| 971 |
``` cpp
|
| 972 |
atomic_flag guard = ATOMIC_FLAG_INIT;
|
| 973 |
```
|
| 974 |
|
| 975 |
+
It is unspecified whether the macro can be used in other initialization
|
| 976 |
+
contexts. For a complete static-duration object, that initialization
|
| 977 |
+
shall be static. Unless initialized with `ATOMIC_FLAG_INIT`, it is
|
| 978 |
+
unspecified whether an `atomic_flag` object has an initial state of set
|
| 979 |
+
or clear.
|
| 980 |
+
|
| 981 |
``` cpp
|
| 982 |
bool atomic_flag_test_and_set(volatile atomic_flag* object) noexcept;
|
| 983 |
bool atomic_flag_test_and_set(atomic_flag* object) noexcept;
|
| 984 |
bool atomic_flag_test_and_set_explicit(volatile atomic_flag* object, memory_order order) noexcept;
|
| 985 |
bool atomic_flag_test_and_set_explicit(atomic_flag* object, memory_order order) noexcept;
|
|
|
|
| 1002 |
void atomic_flag_clear_explicit(atomic_flag* object, memory_order order) noexcept;
|
| 1003 |
void atomic_flag::clear(memory_order order = memory_order_seq_cst) volatile noexcept;
|
| 1004 |
void atomic_flag::clear(memory_order order = memory_order_seq_cst) noexcept;
|
| 1005 |
```
|
| 1006 |
|
| 1007 |
+
*Requires:* The `order` argument shall not be `memory_order_consume`,
|
| 1008 |
+
`memory_order_acquire`, nor `memory_order_acq_rel`.
|
| 1009 |
|
| 1010 |
*Effects:* Atomically sets the value pointed to by `object` or by `this`
|
| 1011 |
to false. Memory is affected according to the value of `order`.
|
| 1012 |
|
| 1013 |
## Fences <a id="atomics.fences">[[atomics.fences]]</a>
|
|
|
|
| 1054 |
|
| 1055 |
``` cpp
|
| 1056 |
extern "C" void atomic_signal_fence(memory_order order) noexcept;
|
| 1057 |
```
|
| 1058 |
|
| 1059 |
+
*Effects:* Equivalent to `atomic_thread_fence(order)`, except that the
|
| 1060 |
resulting ordering constraints are established only between a thread and
|
| 1061 |
a signal handler executed in the same thread.
|
| 1062 |
|
| 1063 |
*Note:* `atomic_signal_fence` can be used to specify the order in which
|
| 1064 |
actions performed by the thread become visible to the signal handler.
|