tmp/tmpcelgs5hu/{from.md → to.md}
RENAMED
|
@@ -1,21 +1,28 @@
|
|
| 1 |
-
### Partial specializations for smart pointers <a id="util.smartptr.atomic">[[util.smartptr.atomic]]</a>
|
|
|
|
|
|
|
| 2 |
|
| 3 |
The library provides partial specializations of the `atomic` template
|
| 4 |
-
for shared-ownership smart pointers [[
|
| 5 |
-
operations is as specified in [[atomics.types.generic]], unless
|
| 6 |
-
specified otherwise. The template parameter `T` of these partial
|
| 7 |
-
specializations may be an incomplete type.
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
atomically. Associated `use_count` decrements are sequenced after the
|
| 12 |
atomic operation, but are not required to be part of it. Any associated
|
| 13 |
deletion and deallocation are sequenced after the atomic update step and
|
| 14 |
are not part of the atomic operation.
|
| 15 |
|
| 16 |
-
[*Note
|
| 17 |
implementation will be held when any `use_count` adjustments are
|
| 18 |
performed, and will not be held when any destruction or deallocation
|
| 19 |
resulting from this is performed. — *end note*]
|
| 20 |
|
| 21 |
[*Example 1*:
|
|
@@ -27,16 +34,16 @@ template<typename T> class atomic_list {
|
|
| 27 |
shared_ptr<node> next;
|
| 28 |
};
|
| 29 |
atomic<shared_ptr<node>> head;
|
| 30 |
|
| 31 |
public:
|
| 32 |
-
|
| 33 |
auto p = head.load();
|
| 34 |
while (p && p->t != t)
|
| 35 |
p = p->next;
|
| 36 |
|
| 37 |
-
return
|
| 38 |
}
|
| 39 |
|
| 40 |
void push_front(T t) {
|
| 41 |
auto p = make_shared<node>();
|
| 42 |
p->t = t;
|
|
@@ -46,21 +53,22 @@ public:
|
|
| 46 |
};
|
| 47 |
```
|
| 48 |
|
| 49 |
— *end example*]
|
| 50 |
|
| 51 |
-
#### Partial specialization for `shared_ptr` <a id="util.smartptr.atomic.shared">[[util.smartptr.atomic.shared]]</a>
|
| 52 |
|
| 53 |
``` cpp
|
| 54 |
namespace std {
|
| 55 |
template<class T> struct atomic<shared_ptr<T>> {
|
| 56 |
using value_type = shared_ptr<T>;
|
| 57 |
|
| 58 |
static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
|
| 59 |
bool is_lock_free() const noexcept;
|
| 60 |
|
| 61 |
constexpr atomic() noexcept;
|
|
|
|
| 62 |
atomic(shared_ptr<T> desired) noexcept;
|
| 63 |
atomic(const atomic&) = delete;
|
| 64 |
void operator=(const atomic&) = delete;
|
| 65 |
|
| 66 |
shared_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
|
|
@@ -255,11 +263,11 @@ void notify_all() noexcept;
|
|
| 255 |
are eligible to be unblocked [[atomics.wait]] by this call.
|
| 256 |
|
| 257 |
*Remarks:* This function is an atomic notifying
|
| 258 |
operation [[atomics.wait]].
|
| 259 |
|
| 260 |
-
#### Partial specialization for `weak_ptr` <a id="util.smartptr.atomic.weak">[[util.smartptr.atomic.weak]]</a>
|
| 261 |
|
| 262 |
``` cpp
|
| 263 |
namespace std {
|
| 264 |
template<class T> struct atomic<weak_ptr<T>> {
|
| 265 |
using value_type = weak_ptr<T>;
|
|
@@ -309,11 +317,11 @@ atomic(weak_ptr<T> desired) noexcept;
|
|
| 309 |
```
|
| 310 |
|
| 311 |
*Effects:* Initializes the object with the value `desired`.
|
| 312 |
Initialization is not an atomic operation [[intro.multithread]].
|
| 313 |
|
| 314 |
-
[*Note
|
| 315 |
race with its construction, for example, by communicating the address of
|
| 316 |
the just-constructed object `A` to another thread via
|
| 317 |
`memory_order::relaxed` operations on a suitable atomic pointer
|
| 318 |
variable, and then immediately accessing `A` in the receiving thread.
|
| 319 |
This results in undefined behavior. — *end note*]
|
|
|
|
| 1 |
+
#### Partial specializations for smart pointers <a id="util.smartptr.atomic">[[util.smartptr.atomic]]</a>
|
| 2 |
+
|
| 3 |
+
##### General <a id="util.smartptr.atomic.general">[[util.smartptr.atomic.general]]</a>
|
| 4 |
|
| 5 |
The library provides partial specializations of the `atomic` template
|
| 6 |
+
for shared-ownership smart pointers [[util.sharedptr]].
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
+
[*Note 1*: The partial specializations are declared in header
|
| 9 |
+
`<memory>`. — *end note*]
|
| 10 |
+
|
| 11 |
+
The behavior of all operations is as specified in
|
| 12 |
+
[[atomics.types.generic]], unless specified otherwise. The template
|
| 13 |
+
parameter `T` of these partial specializations may be an incomplete
|
| 14 |
+
type.
|
| 15 |
+
|
| 16 |
+
All changes to an atomic smart pointer in [[util.smartptr.atomic]], and
|
| 17 |
+
all associated `use_count` increments, are guaranteed to be performed
|
| 18 |
atomically. Associated `use_count` decrements are sequenced after the
|
| 19 |
atomic operation, but are not required to be part of it. Any associated
|
| 20 |
deletion and deallocation are sequenced after the atomic update step and
|
| 21 |
are not part of the atomic operation.
|
| 22 |
|
| 23 |
+
[*Note 2*: If the atomic operation uses locks, locks acquired by the
|
| 24 |
implementation will be held when any `use_count` adjustments are
|
| 25 |
performed, and will not be held when any destruction or deallocation
|
| 26 |
resulting from this is performed. — *end note*]
|
| 27 |
|
| 28 |
[*Example 1*:
|
|
|
|
| 34 |
shared_ptr<node> next;
|
| 35 |
};
|
| 36 |
atomic<shared_ptr<node>> head;
|
| 37 |
|
| 38 |
public:
|
| 39 |
+
shared_ptr<node> find(T t) const {
|
| 40 |
auto p = head.load();
|
| 41 |
while (p && p->t != t)
|
| 42 |
p = p->next;
|
| 43 |
|
| 44 |
+
return p;
|
| 45 |
}
|
| 46 |
|
| 47 |
void push_front(T t) {
|
| 48 |
auto p = make_shared<node>();
|
| 49 |
p->t = t;
|
|
|
|
| 53 |
};
|
| 54 |
```
|
| 55 |
|
| 56 |
— *end example*]
|
| 57 |
|
| 58 |
+
##### Partial specialization for `shared_ptr` <a id="util.smartptr.atomic.shared">[[util.smartptr.atomic.shared]]</a>
|
| 59 |
|
| 60 |
``` cpp
|
| 61 |
namespace std {
|
| 62 |
template<class T> struct atomic<shared_ptr<T>> {
|
| 63 |
using value_type = shared_ptr<T>;
|
| 64 |
|
| 65 |
static constexpr bool is_always_lock_free = implementation-defined // whether a given atomic type's operations are always lock free;
|
| 66 |
bool is_lock_free() const noexcept;
|
| 67 |
|
| 68 |
constexpr atomic() noexcept;
|
| 69 |
+
constexpr atomic(nullptr_t) noexcept : atomic() { }
|
| 70 |
atomic(shared_ptr<T> desired) noexcept;
|
| 71 |
atomic(const atomic&) = delete;
|
| 72 |
void operator=(const atomic&) = delete;
|
| 73 |
|
| 74 |
shared_ptr<T> load(memory_order order = memory_order::seq_cst) const noexcept;
|
|
|
|
| 263 |
are eligible to be unblocked [[atomics.wait]] by this call.
|
| 264 |
|
| 265 |
*Remarks:* This function is an atomic notifying
|
| 266 |
operation [[atomics.wait]].
|
| 267 |
|
| 268 |
+
##### Partial specialization for `weak_ptr` <a id="util.smartptr.atomic.weak">[[util.smartptr.atomic.weak]]</a>
|
| 269 |
|
| 270 |
``` cpp
|
| 271 |
namespace std {
|
| 272 |
template<class T> struct atomic<weak_ptr<T>> {
|
| 273 |
using value_type = weak_ptr<T>;
|
|
|
|
| 317 |
```
|
| 318 |
|
| 319 |
*Effects:* Initializes the object with the value `desired`.
|
| 320 |
Initialization is not an atomic operation [[intro.multithread]].
|
| 321 |
|
| 322 |
+
[*Note 2*: It is possible to have an access to an atomic object `A`
|
| 323 |
race with its construction, for example, by communicating the address of
|
| 324 |
the just-constructed object `A` to another thread via
|
| 325 |
`memory_order::relaxed` operations on a suitable atomic pointer
|
| 326 |
variable, and then immediately accessing `A` in the receiving thread.
|
| 327 |
This results in undefined behavior. — *end note*]
|