libstdc++
atomic_base.h
Go to the documentation of this file.
1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2024 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#pragma GCC system_header
34
35#include <bits/c++config.h>
36#include <new> // For placement new
38#include <bits/move.h>
39
40#if __cplusplus > 201703L && _GLIBCXX_HOSTED
41#include <bits/atomic_wait.h>
42#endif
43
44#ifndef _GLIBCXX_ALWAYS_INLINE
45#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
46#endif
47
48#include <bits/version.h>
49
50namespace std _GLIBCXX_VISIBILITY(default)
51{
52_GLIBCXX_BEGIN_NAMESPACE_VERSION
53
54 /**
55 * @defgroup atomics Atomics
56 *
57 * Components for performing atomic operations.
58 * @{
59 */
60
61 /// Enumeration for memory_order
62#if __cplusplus > 201703L
63 enum class memory_order : int
64 {
65 relaxed,
66 consume,
67 acquire,
68 release,
69 acq_rel,
70 seq_cst
71 };
72
73 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
74 inline constexpr memory_order memory_order_consume = memory_order::consume;
75 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
76 inline constexpr memory_order memory_order_release = memory_order::release;
77 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
78 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
79#else
80 enum memory_order : int
81 {
82 memory_order_relaxed,
83 memory_order_consume,
84 memory_order_acquire,
85 memory_order_release,
86 memory_order_acq_rel,
87 memory_order_seq_cst
88 };
89#endif
90
91 /// @cond undocumented
92 enum __memory_order_modifier
93 {
94 __memory_order_mask = 0x0ffff,
95 __memory_order_modifier_mask = 0xffff0000,
96 __memory_order_hle_acquire = 0x10000,
97 __memory_order_hle_release = 0x20000
98 };
99 /// @endcond
100
101 constexpr memory_order
102 operator|(memory_order __m, __memory_order_modifier __mod) noexcept
103 {
104 return memory_order(int(__m) | int(__mod));
105 }
106
107 constexpr memory_order
108 operator&(memory_order __m, __memory_order_modifier __mod) noexcept
109 {
110 return memory_order(int(__m) & int(__mod));
111 }
112
113 /// @cond undocumented
114
115 // Drop release ordering as per [atomics.types.operations.req]/21
116 constexpr memory_order
117 __cmpexch_failure_order2(memory_order __m) noexcept
118 {
119 return __m == memory_order_acq_rel ? memory_order_acquire
120 : __m == memory_order_release ? memory_order_relaxed : __m;
121 }
122
123 constexpr memory_order
124 __cmpexch_failure_order(memory_order __m) noexcept
125 {
126 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
127 | __memory_order_modifier(__m & __memory_order_modifier_mask));
128 }
129
130 constexpr bool
131 __is_valid_cmpexch_failure_order(memory_order __m) noexcept
132 {
133 return (__m & __memory_order_mask) != memory_order_release
134 && (__m & __memory_order_mask) != memory_order_acq_rel;
135 }
136
137 // Base types for atomics.
138 template<typename _IntTp>
139 struct __atomic_base;
140
141 /// @endcond
142
143 _GLIBCXX_ALWAYS_INLINE void
144 atomic_thread_fence(memory_order __m) noexcept
145 { __atomic_thread_fence(int(__m)); }
146
147 _GLIBCXX_ALWAYS_INLINE void
148 atomic_signal_fence(memory_order __m) noexcept
149 { __atomic_signal_fence(int(__m)); }
150
151 /// kill_dependency
152 template<typename _Tp>
153 inline _Tp
154 kill_dependency(_Tp __y) noexcept
155 {
156 _Tp __ret(__y);
157 return __ret;
158 }
159
160/// @cond undocumented
161#if __glibcxx_atomic_value_initialization
162# define _GLIBCXX20_INIT(I) = I
163#else
164# define _GLIBCXX20_INIT(I)
165#endif
166/// @endcond
167
168#define ATOMIC_VAR_INIT(_VI) { _VI }
169
170 template<typename _Tp>
171 struct atomic;
172
173 template<typename _Tp>
174 struct atomic<_Tp*>;
175
176 /* The target's "set" value for test-and-set may not be exactly 1. */
177#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
178 typedef bool __atomic_flag_data_type;
179#else
180 typedef unsigned char __atomic_flag_data_type;
181#endif
182
183 /// @cond undocumented
184
185 /*
186 * Base type for atomic_flag.
187 *
188 * Base type is POD with data, allowing atomic_flag to derive from
189 * it and meet the standard layout type requirement. In addition to
190 * compatibility with a C interface, this allows different
191 * implementations of atomic_flag to use the same atomic operation
192 * functions, via a standard conversion to the __atomic_flag_base
193 * argument.
194 */
195 _GLIBCXX_BEGIN_EXTERN_C
196
197 struct __atomic_flag_base
198 {
199 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
200 };
201
202 _GLIBCXX_END_EXTERN_C
203
204 /// @endcond
205
206#define ATOMIC_FLAG_INIT { 0 }
207
208 /// atomic_flag
209 struct atomic_flag : public __atomic_flag_base
210 {
211 atomic_flag() noexcept = default;
212 ~atomic_flag() noexcept = default;
213 atomic_flag(const atomic_flag&) = delete;
214 atomic_flag& operator=(const atomic_flag&) = delete;
215 atomic_flag& operator=(const atomic_flag&) volatile = delete;
216
217 // Conversion to ATOMIC_FLAG_INIT.
218 constexpr atomic_flag(bool __i) noexcept
219 : __atomic_flag_base{ _S_init(__i) }
220 { }
221
222 _GLIBCXX_ALWAYS_INLINE bool
223 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
224 {
225 return __atomic_test_and_set (&_M_i, int(__m));
226 }
227
228 _GLIBCXX_ALWAYS_INLINE bool
229 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
230 {
231 return __atomic_test_and_set (&_M_i, int(__m));
232 }
233
234#ifdef __glibcxx_atomic_flag_test // C++ >= 20
235 _GLIBCXX_ALWAYS_INLINE bool
236 test(memory_order __m = memory_order_seq_cst) const noexcept
237 {
238 __atomic_flag_data_type __v;
239 __atomic_load(&_M_i, &__v, int(__m));
240 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
241 }
242
243 _GLIBCXX_ALWAYS_INLINE bool
244 test(memory_order __m = memory_order_seq_cst) const volatile noexcept
245 {
246 __atomic_flag_data_type __v;
247 __atomic_load(&_M_i, &__v, int(__m));
248 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
249 }
250#endif
251
252#if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
253 _GLIBCXX_ALWAYS_INLINE void
254 wait(bool __old,
255 memory_order __m = memory_order_seq_cst) const noexcept
256 {
257 const __atomic_flag_data_type __v
258 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
259
260 std::__atomic_wait_address_v(&_M_i, __v,
261 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
262 }
263
264 // TODO add const volatile overload
265
266 _GLIBCXX_ALWAYS_INLINE void
267 notify_one() noexcept
268 { std::__atomic_notify_address(&_M_i, false); }
269
270 // TODO add const volatile overload
271
272 _GLIBCXX_ALWAYS_INLINE void
273 notify_all() noexcept
274 { std::__atomic_notify_address(&_M_i, true); }
275
276 // TODO add const volatile overload
277#endif // __glibcxx_atomic_wait
278
279 _GLIBCXX_ALWAYS_INLINE void
280 clear(memory_order __m = memory_order_seq_cst) noexcept
281 {
282 memory_order __b __attribute__ ((__unused__))
283 = __m & __memory_order_mask;
284 __glibcxx_assert(__b != memory_order_consume);
285 __glibcxx_assert(__b != memory_order_acquire);
286 __glibcxx_assert(__b != memory_order_acq_rel);
287
288 __atomic_clear (&_M_i, int(__m));
289 }
290
291 _GLIBCXX_ALWAYS_INLINE void
292 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
293 {
294 memory_order __b __attribute__ ((__unused__))
295 = __m & __memory_order_mask;
296 __glibcxx_assert(__b != memory_order_consume);
297 __glibcxx_assert(__b != memory_order_acquire);
298 __glibcxx_assert(__b != memory_order_acq_rel);
299
300 __atomic_clear (&_M_i, int(__m));
301 }
302
303 private:
304 static constexpr __atomic_flag_data_type
305 _S_init(bool __i)
306 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
307 };
308
309 /// @cond undocumented
310
311 /// Base class for atomic integrals.
312 //
313 // For each of the integral types, define atomic_[integral type] struct
314 //
315 // atomic_bool bool
316 // atomic_char char
317 // atomic_schar signed char
318 // atomic_uchar unsigned char
319 // atomic_short short
320 // atomic_ushort unsigned short
321 // atomic_int int
322 // atomic_uint unsigned int
323 // atomic_long long
324 // atomic_ulong unsigned long
325 // atomic_llong long long
326 // atomic_ullong unsigned long long
327 // atomic_char8_t char8_t
328 // atomic_char16_t char16_t
329 // atomic_char32_t char32_t
330 // atomic_wchar_t wchar_t
331 //
332 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
333 // 8 bytes, since that is what GCC built-in functions for atomic
334 // memory access expect.
335 template<typename _ITp>
336 struct __atomic_base
337 {
338 using value_type = _ITp;
339 using difference_type = value_type;
340
341 private:
342 typedef _ITp __int_type;
343
344 static constexpr int _S_alignment =
345 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
346
347 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
348
349 public:
350 __atomic_base() noexcept = default;
351 ~__atomic_base() noexcept = default;
352 __atomic_base(const __atomic_base&) = delete;
353 __atomic_base& operator=(const __atomic_base&) = delete;
354 __atomic_base& operator=(const __atomic_base&) volatile = delete;
355
356 // Requires __int_type convertible to _M_i.
357 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
358
359 operator __int_type() const noexcept
360 { return load(); }
361
362 operator __int_type() const volatile noexcept
363 { return load(); }
364
365 __int_type
366 operator=(__int_type __i) noexcept
367 {
368 store(__i);
369 return __i;
370 }
371
372 __int_type
373 operator=(__int_type __i) volatile noexcept
374 {
375 store(__i);
376 return __i;
377 }
378
379 __int_type
380 operator++(int) noexcept
381 { return fetch_add(1); }
382
383 __int_type
384 operator++(int) volatile noexcept
385 { return fetch_add(1); }
386
387 __int_type
388 operator--(int) noexcept
389 { return fetch_sub(1); }
390
391 __int_type
392 operator--(int) volatile noexcept
393 { return fetch_sub(1); }
394
395 __int_type
396 operator++() noexcept
397 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
398
399 __int_type
400 operator++() volatile noexcept
401 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
402
403 __int_type
404 operator--() noexcept
405 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
406
407 __int_type
408 operator--() volatile noexcept
409 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
410
411 __int_type
412 operator+=(__int_type __i) noexcept
413 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
414
415 __int_type
416 operator+=(__int_type __i) volatile noexcept
417 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
418
419 __int_type
420 operator-=(__int_type __i) noexcept
421 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
422
423 __int_type
424 operator-=(__int_type __i) volatile noexcept
425 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
426
427 __int_type
428 operator&=(__int_type __i) noexcept
429 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
430
431 __int_type
432 operator&=(__int_type __i) volatile noexcept
433 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
434
435 __int_type
436 operator|=(__int_type __i) noexcept
437 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
438
439 __int_type
440 operator|=(__int_type __i) volatile noexcept
441 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
442
443 __int_type
444 operator^=(__int_type __i) noexcept
445 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
446
447 __int_type
448 operator^=(__int_type __i) volatile noexcept
449 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
450
451 bool
452 is_lock_free() const noexcept
453 {
454 // Use a fake, minimally aligned pointer.
455 return __atomic_is_lock_free(sizeof(_M_i),
456 reinterpret_cast<void *>(-_S_alignment));
457 }
458
459 bool
460 is_lock_free() const volatile noexcept
461 {
462 // Use a fake, minimally aligned pointer.
463 return __atomic_is_lock_free(sizeof(_M_i),
464 reinterpret_cast<void *>(-_S_alignment));
465 }
466
467 _GLIBCXX_ALWAYS_INLINE void
468 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
469 {
470 memory_order __b __attribute__ ((__unused__))
471 = __m & __memory_order_mask;
472 __glibcxx_assert(__b != memory_order_acquire);
473 __glibcxx_assert(__b != memory_order_acq_rel);
474 __glibcxx_assert(__b != memory_order_consume);
475
476 __atomic_store_n(&_M_i, __i, int(__m));
477 }
478
479 _GLIBCXX_ALWAYS_INLINE void
480 store(__int_type __i,
481 memory_order __m = memory_order_seq_cst) volatile noexcept
482 {
483 memory_order __b __attribute__ ((__unused__))
484 = __m & __memory_order_mask;
485 __glibcxx_assert(__b != memory_order_acquire);
486 __glibcxx_assert(__b != memory_order_acq_rel);
487 __glibcxx_assert(__b != memory_order_consume);
488
489 __atomic_store_n(&_M_i, __i, int(__m));
490 }
491
492 _GLIBCXX_ALWAYS_INLINE __int_type
493 load(memory_order __m = memory_order_seq_cst) const noexcept
494 {
495 memory_order __b __attribute__ ((__unused__))
496 = __m & __memory_order_mask;
497 __glibcxx_assert(__b != memory_order_release);
498 __glibcxx_assert(__b != memory_order_acq_rel);
499
500 return __atomic_load_n(&_M_i, int(__m));
501 }
502
503 _GLIBCXX_ALWAYS_INLINE __int_type
504 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
505 {
506 memory_order __b __attribute__ ((__unused__))
507 = __m & __memory_order_mask;
508 __glibcxx_assert(__b != memory_order_release);
509 __glibcxx_assert(__b != memory_order_acq_rel);
510
511 return __atomic_load_n(&_M_i, int(__m));
512 }
513
514 _GLIBCXX_ALWAYS_INLINE __int_type
515 exchange(__int_type __i,
516 memory_order __m = memory_order_seq_cst) noexcept
517 {
518 return __atomic_exchange_n(&_M_i, __i, int(__m));
519 }
520
521
522 _GLIBCXX_ALWAYS_INLINE __int_type
523 exchange(__int_type __i,
524 memory_order __m = memory_order_seq_cst) volatile noexcept
525 {
526 return __atomic_exchange_n(&_M_i, __i, int(__m));
527 }
528
529 _GLIBCXX_ALWAYS_INLINE bool
530 compare_exchange_weak(__int_type& __i1, __int_type __i2,
531 memory_order __m1, memory_order __m2) noexcept
532 {
533 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
534
535 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
536 int(__m1), int(__m2));
537 }
538
539 _GLIBCXX_ALWAYS_INLINE bool
540 compare_exchange_weak(__int_type& __i1, __int_type __i2,
541 memory_order __m1,
542 memory_order __m2) volatile noexcept
543 {
544 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
545
546 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
547 int(__m1), int(__m2));
548 }
549
550 _GLIBCXX_ALWAYS_INLINE bool
551 compare_exchange_weak(__int_type& __i1, __int_type __i2,
552 memory_order __m = memory_order_seq_cst) noexcept
553 {
554 return compare_exchange_weak(__i1, __i2, __m,
555 __cmpexch_failure_order(__m));
556 }
557
558 _GLIBCXX_ALWAYS_INLINE bool
559 compare_exchange_weak(__int_type& __i1, __int_type __i2,
560 memory_order __m = memory_order_seq_cst) volatile noexcept
561 {
562 return compare_exchange_weak(__i1, __i2, __m,
563 __cmpexch_failure_order(__m));
564 }
565
566 _GLIBCXX_ALWAYS_INLINE bool
567 compare_exchange_strong(__int_type& __i1, __int_type __i2,
568 memory_order __m1, memory_order __m2) noexcept
569 {
570 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
571
572 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
573 int(__m1), int(__m2));
574 }
575
576 _GLIBCXX_ALWAYS_INLINE bool
577 compare_exchange_strong(__int_type& __i1, __int_type __i2,
578 memory_order __m1,
579 memory_order __m2) volatile noexcept
580 {
581 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
582
583 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
584 int(__m1), int(__m2));
585 }
586
587 _GLIBCXX_ALWAYS_INLINE bool
588 compare_exchange_strong(__int_type& __i1, __int_type __i2,
589 memory_order __m = memory_order_seq_cst) noexcept
590 {
591 return compare_exchange_strong(__i1, __i2, __m,
592 __cmpexch_failure_order(__m));
593 }
594
595 _GLIBCXX_ALWAYS_INLINE bool
596 compare_exchange_strong(__int_type& __i1, __int_type __i2,
597 memory_order __m = memory_order_seq_cst) volatile noexcept
598 {
599 return compare_exchange_strong(__i1, __i2, __m,
600 __cmpexch_failure_order(__m));
601 }
602
603#if __glibcxx_atomic_wait
604 _GLIBCXX_ALWAYS_INLINE void
605 wait(__int_type __old,
606 memory_order __m = memory_order_seq_cst) const noexcept
607 {
608 std::__atomic_wait_address_v(&_M_i, __old,
609 [__m, this] { return this->load(__m); });
610 }
611
612 // TODO add const volatile overload
613
614 _GLIBCXX_ALWAYS_INLINE void
615 notify_one() noexcept
616 { std::__atomic_notify_address(&_M_i, false); }
617
618 // TODO add const volatile overload
619
620 _GLIBCXX_ALWAYS_INLINE void
621 notify_all() noexcept
622 { std::__atomic_notify_address(&_M_i, true); }
623
624 // TODO add const volatile overload
625#endif // __glibcxx_atomic_wait
626
627 _GLIBCXX_ALWAYS_INLINE __int_type
628 fetch_add(__int_type __i,
629 memory_order __m = memory_order_seq_cst) noexcept
630 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
631
632 _GLIBCXX_ALWAYS_INLINE __int_type
633 fetch_add(__int_type __i,
634 memory_order __m = memory_order_seq_cst) volatile noexcept
635 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
636
637 _GLIBCXX_ALWAYS_INLINE __int_type
638 fetch_sub(__int_type __i,
639 memory_order __m = memory_order_seq_cst) noexcept
640 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
641
642 _GLIBCXX_ALWAYS_INLINE __int_type
643 fetch_sub(__int_type __i,
644 memory_order __m = memory_order_seq_cst) volatile noexcept
645 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
646
647 _GLIBCXX_ALWAYS_INLINE __int_type
648 fetch_and(__int_type __i,
649 memory_order __m = memory_order_seq_cst) noexcept
650 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
651
652 _GLIBCXX_ALWAYS_INLINE __int_type
653 fetch_and(__int_type __i,
654 memory_order __m = memory_order_seq_cst) volatile noexcept
655 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
656
657 _GLIBCXX_ALWAYS_INLINE __int_type
658 fetch_or(__int_type __i,
659 memory_order __m = memory_order_seq_cst) noexcept
660 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
661
662 _GLIBCXX_ALWAYS_INLINE __int_type
663 fetch_or(__int_type __i,
664 memory_order __m = memory_order_seq_cst) volatile noexcept
665 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
666
667 _GLIBCXX_ALWAYS_INLINE __int_type
668 fetch_xor(__int_type __i,
669 memory_order __m = memory_order_seq_cst) noexcept
670 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
671
672 _GLIBCXX_ALWAYS_INLINE __int_type
673 fetch_xor(__int_type __i,
674 memory_order __m = memory_order_seq_cst) volatile noexcept
675 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
676 };
677
678
679 /// Partial specialization for pointer types.
680 template<typename _PTp>
681 struct __atomic_base<_PTp*>
682 {
683 private:
684 typedef _PTp* __pointer_type;
685
686 __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
687
688 // Factored out to facilitate explicit specialization.
689 constexpr ptrdiff_t
690 _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
691
692 constexpr ptrdiff_t
693 _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
694
695 public:
696 __atomic_base() noexcept = default;
697 ~__atomic_base() noexcept = default;
698 __atomic_base(const __atomic_base&) = delete;
699 __atomic_base& operator=(const __atomic_base&) = delete;
700 __atomic_base& operator=(const __atomic_base&) volatile = delete;
701
702 // Requires __pointer_type convertible to _M_p.
703 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
704
705 operator __pointer_type() const noexcept
706 { return load(); }
707
708 operator __pointer_type() const volatile noexcept
709 { return load(); }
710
711 __pointer_type
712 operator=(__pointer_type __p) noexcept
713 {
714 store(__p);
715 return __p;
716 }
717
718 __pointer_type
719 operator=(__pointer_type __p) volatile noexcept
720 {
721 store(__p);
722 return __p;
723 }
724
725 __pointer_type
726 operator++(int) noexcept
727 { return fetch_add(1); }
728
729 __pointer_type
730 operator++(int) volatile noexcept
731 { return fetch_add(1); }
732
733 __pointer_type
734 operator--(int) noexcept
735 { return fetch_sub(1); }
736
737 __pointer_type
738 operator--(int) volatile noexcept
739 { return fetch_sub(1); }
740
741 __pointer_type
742 operator++() noexcept
743 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
744 int(memory_order_seq_cst)); }
745
746 __pointer_type
747 operator++() volatile noexcept
748 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
749 int(memory_order_seq_cst)); }
750
751 __pointer_type
752 operator--() noexcept
753 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
754 int(memory_order_seq_cst)); }
755
756 __pointer_type
757 operator--() volatile noexcept
758 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
759 int(memory_order_seq_cst)); }
760
761 __pointer_type
762 operator+=(ptrdiff_t __d) noexcept
763 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
764 int(memory_order_seq_cst)); }
765
766 __pointer_type
767 operator+=(ptrdiff_t __d) volatile noexcept
768 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
769 int(memory_order_seq_cst)); }
770
771 __pointer_type
772 operator-=(ptrdiff_t __d) noexcept
773 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
774 int(memory_order_seq_cst)); }
775
776 __pointer_type
777 operator-=(ptrdiff_t __d) volatile noexcept
778 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
779 int(memory_order_seq_cst)); }
780
781 bool
782 is_lock_free() const noexcept
783 {
784 // Produce a fake, minimally aligned pointer.
785 return __atomic_is_lock_free(sizeof(_M_p),
786 reinterpret_cast<void *>(-__alignof(_M_p)));
787 }
788
789 bool
790 is_lock_free() const volatile noexcept
791 {
792 // Produce a fake, minimally aligned pointer.
793 return __atomic_is_lock_free(sizeof(_M_p),
794 reinterpret_cast<void *>(-__alignof(_M_p)));
795 }
796
797 _GLIBCXX_ALWAYS_INLINE void
798 store(__pointer_type __p,
799 memory_order __m = memory_order_seq_cst) noexcept
800 {
801 memory_order __b __attribute__ ((__unused__))
802 = __m & __memory_order_mask;
803
804 __glibcxx_assert(__b != memory_order_acquire);
805 __glibcxx_assert(__b != memory_order_acq_rel);
806 __glibcxx_assert(__b != memory_order_consume);
807
808 __atomic_store_n(&_M_p, __p, int(__m));
809 }
810
811 _GLIBCXX_ALWAYS_INLINE void
812 store(__pointer_type __p,
813 memory_order __m = memory_order_seq_cst) volatile noexcept
814 {
815 memory_order __b __attribute__ ((__unused__))
816 = __m & __memory_order_mask;
817 __glibcxx_assert(__b != memory_order_acquire);
818 __glibcxx_assert(__b != memory_order_acq_rel);
819 __glibcxx_assert(__b != memory_order_consume);
820
821 __atomic_store_n(&_M_p, __p, int(__m));
822 }
823
824 _GLIBCXX_ALWAYS_INLINE __pointer_type
825 load(memory_order __m = memory_order_seq_cst) const noexcept
826 {
827 memory_order __b __attribute__ ((__unused__))
828 = __m & __memory_order_mask;
829 __glibcxx_assert(__b != memory_order_release);
830 __glibcxx_assert(__b != memory_order_acq_rel);
831
832 return __atomic_load_n(&_M_p, int(__m));
833 }
834
835 _GLIBCXX_ALWAYS_INLINE __pointer_type
836 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
837 {
838 memory_order __b __attribute__ ((__unused__))
839 = __m & __memory_order_mask;
840 __glibcxx_assert(__b != memory_order_release);
841 __glibcxx_assert(__b != memory_order_acq_rel);
842
843 return __atomic_load_n(&_M_p, int(__m));
844 }
845
846 _GLIBCXX_ALWAYS_INLINE __pointer_type
847 exchange(__pointer_type __p,
848 memory_order __m = memory_order_seq_cst) noexcept
849 {
850 return __atomic_exchange_n(&_M_p, __p, int(__m));
851 }
852
853
854 _GLIBCXX_ALWAYS_INLINE __pointer_type
855 exchange(__pointer_type __p,
856 memory_order __m = memory_order_seq_cst) volatile noexcept
857 {
858 return __atomic_exchange_n(&_M_p, __p, int(__m));
859 }
860
861 _GLIBCXX_ALWAYS_INLINE bool
862 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
863 memory_order __m1,
864 memory_order __m2) noexcept
865 {
866 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
867
868 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
869 int(__m1), int(__m2));
870 }
871
872 _GLIBCXX_ALWAYS_INLINE bool
873 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
874 memory_order __m1,
875 memory_order __m2) volatile noexcept
876 {
877 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
878
879 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
880 int(__m1), int(__m2));
881 }
882
883 _GLIBCXX_ALWAYS_INLINE bool
884 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
885 memory_order __m1,
886 memory_order __m2) noexcept
887 {
888 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
889
890 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
891 int(__m1), int(__m2));
892 }
893
894 _GLIBCXX_ALWAYS_INLINE bool
895 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
896 memory_order __m1,
897 memory_order __m2) volatile noexcept
898 {
899 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
900
901 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
902 int(__m1), int(__m2));
903 }
904
905#if __glibcxx_atomic_wait
906 _GLIBCXX_ALWAYS_INLINE void
907 wait(__pointer_type __old,
908 memory_order __m = memory_order_seq_cst) const noexcept
909 {
910 std::__atomic_wait_address_v(&_M_p, __old,
911 [__m, this]
912 { return this->load(__m); });
913 }
914
915 // TODO add const volatile overload
916
917 _GLIBCXX_ALWAYS_INLINE void
918 notify_one() const noexcept
919 { std::__atomic_notify_address(&_M_p, false); }
920
921 // TODO add const volatile overload
922
923 _GLIBCXX_ALWAYS_INLINE void
924 notify_all() const noexcept
925 { std::__atomic_notify_address(&_M_p, true); }
926
927 // TODO add const volatile overload
928#endif // __glibcxx_atomic_wait
929
930 _GLIBCXX_ALWAYS_INLINE __pointer_type
931 fetch_add(ptrdiff_t __d,
932 memory_order __m = memory_order_seq_cst) noexcept
933 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
934
935 _GLIBCXX_ALWAYS_INLINE __pointer_type
936 fetch_add(ptrdiff_t __d,
937 memory_order __m = memory_order_seq_cst) volatile noexcept
938 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
939
940 _GLIBCXX_ALWAYS_INLINE __pointer_type
941 fetch_sub(ptrdiff_t __d,
942 memory_order __m = memory_order_seq_cst) noexcept
943 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
944
945 _GLIBCXX_ALWAYS_INLINE __pointer_type
946 fetch_sub(ptrdiff_t __d,
947 memory_order __m = memory_order_seq_cst) volatile noexcept
948 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
949 };
950
951 namespace __atomic_impl
952 {
953 // Implementation details of atomic padding handling
954
955 template<typename _Tp>
956 constexpr bool
957 __maybe_has_padding()
958 {
959#if ! __has_builtin(__builtin_clear_padding)
960 return false;
961#elif __has_builtin(__has_unique_object_representations)
962 return !__has_unique_object_representations(_Tp)
963 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
964#else
965 return true;
966#endif
967 }
968
969 template<typename _Tp>
970 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
971 __clear_padding(_Tp& __val) noexcept
972 {
973 auto* __ptr = std::__addressof(__val);
974#if __has_builtin(__builtin_clear_padding)
975 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
976 __builtin_clear_padding(__ptr);
977#endif
978 return __ptr;
979 }
980
981 // Remove volatile and create a non-deduced context for value arguments.
982 template<typename _Tp>
983 using _Val = typename remove_volatile<_Tp>::type;
984
985#pragma GCC diagnostic push
986#pragma GCC diagnostic ignored "-Wc++17-extensions"
987
988 template<bool _AtomicRef = false, typename _Tp>
989 _GLIBCXX_ALWAYS_INLINE bool
990 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
991 bool __is_weak,
992 memory_order __s, memory_order __f) noexcept
993 {
994 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
995
996 using _Vp = _Val<_Tp>;
997 _Tp* const __pval = std::__addressof(__val);
998
999 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1000 {
1001 return __atomic_compare_exchange(__pval, std::__addressof(__e),
1002 std::__addressof(__i), __is_weak,
1003 int(__s), int(__f));
1004 }
1005 else if constexpr (!_AtomicRef) // std::atomic<T>
1006 {
1007 // Clear padding of the value we want to set:
1008 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1009 // Only allowed to modify __e on failure, so make a copy:
1010 _Vp __exp = __e;
1011 // Clear padding of the expected value:
1012 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1013
1014 // For std::atomic<T> we know that the contained value will already
1015 // have zeroed padding, so trivial memcmp semantics are OK.
1016 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1017 __is_weak, int(__s), int(__f)))
1018 return true;
1019 // Value bits must be different, copy from __exp back to __e:
1020 __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1021 return false;
1022 }
1023 else // std::atomic_ref<T> where T has padding bits.
1024 {
1025 // Clear padding of the value we want to set:
1026 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1027
1028 // Only allowed to modify __e on failure, so make a copy:
1029 _Vp __exp = __e;
1030 // Optimistically assume that a previous store had zeroed padding
1031 // so that zeroing it in the expected value will match first time.
1032 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1033
1034 // compare_exchange is specified to compare value representations.
1035 // Need to check whether a failure is 'real' or just due to
1036 // differences in padding bits. This loop should run no more than
1037 // three times, because the worst case scenario is:
1038 // First CAS fails because the actual value has non-zero padding.
1039 // Second CAS fails because another thread stored the same value,
1040 // but now with padding cleared. Third CAS succeeds.
1041 // We will never need to loop a fourth time, because any value
1042 // written by another thread (whether via store, exchange or
1043 // compare_exchange) will have had its padding cleared.
1044 while (true)
1045 {
1046 // Copy of the expected value so we can clear its padding.
1047 _Vp __orig = __exp;
1048
1049 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1050 __is_weak, int(__s), int(__f)))
1051 return true;
1052
1053 // Copy of the actual value so we can clear its padding.
1054 _Vp __curr = __exp;
1055
1056 // Compare value representations (i.e. ignoring padding).
1057 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1058 __atomic_impl::__clear_padding(__curr),
1059 sizeof(_Vp)))
1060 {
1061 // Value representations compare unequal, real failure.
1062 __builtin_memcpy(std::__addressof(__e), __pexp,
1063 sizeof(_Vp));
1064 return false;
1065 }
1066 }
1067 }
1068 }
1069#pragma GCC diagnostic pop
1070 } // namespace __atomic_impl
1071
1072#if __cplusplus > 201703L
1073 // Implementation details of atomic_ref and atomic<floating-point>.
1074 namespace __atomic_impl
1075 {
1076 // Like _Val<T> above, but for difference_type arguments.
1077 template<typename _Tp>
1078 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1079
1080 template<size_t _Size, size_t _Align>
1081 _GLIBCXX_ALWAYS_INLINE bool
1082 is_lock_free() noexcept
1083 {
1084 // Produce a fake, minimally aligned pointer.
1085 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1086 }
1087
1088 template<typename _Tp>
1089 _GLIBCXX_ALWAYS_INLINE void
1090 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1091 {
1092 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1093 }
1094
1095 template<typename _Tp>
1096 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1097 load(const _Tp* __ptr, memory_order __m) noexcept
1098 {
1099 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1100 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1101 __atomic_load(__ptr, __dest, int(__m));
1102 return *__dest;
1103 }
1104
1105 template<typename _Tp>
1106 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1107 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1108 {
1109 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1110 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1111 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1112 __dest, int(__m));
1113 return *__dest;
1114 }
1115
1116 template<bool _AtomicRef = false, typename _Tp>
1117 _GLIBCXX_ALWAYS_INLINE bool
1118 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1119 _Val<_Tp> __desired, memory_order __success,
1120 memory_order __failure) noexcept
1121 {
1122 return __atomic_impl::__compare_exchange<_AtomicRef>(
1123 *__ptr, __expected, __desired, true, __success, __failure);
1124 }
1125
1126 template<bool _AtomicRef = false, typename _Tp>
1127 _GLIBCXX_ALWAYS_INLINE bool
1128 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1129 _Val<_Tp> __desired, memory_order __success,
1130 memory_order __failure) noexcept
1131 {
1132 return __atomic_impl::__compare_exchange<_AtomicRef>(
1133 *__ptr, __expected, __desired, false, __success, __failure);
1134 }
1135
1136#if __glibcxx_atomic_wait
1137 template<typename _Tp>
1138 _GLIBCXX_ALWAYS_INLINE void
1139 wait(const _Tp* __ptr, _Val<_Tp> __old,
1140 memory_order __m = memory_order_seq_cst) noexcept
1141 {
1142 std::__atomic_wait_address_v(__ptr, __old,
1143 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1144 }
1145
1146 // TODO add const volatile overload
1147
1148 template<typename _Tp>
1149 _GLIBCXX_ALWAYS_INLINE void
1150 notify_one(const _Tp* __ptr) noexcept
1151 { std::__atomic_notify_address(__ptr, false); }
1152
1153 // TODO add const volatile overload
1154
1155 template<typename _Tp>
1156 _GLIBCXX_ALWAYS_INLINE void
1157 notify_all(const _Tp* __ptr) noexcept
1158 { std::__atomic_notify_address(__ptr, true); }
1159
1160 // TODO add const volatile overload
1161#endif // __glibcxx_atomic_wait
1162
1163 template<typename _Tp>
1164 _GLIBCXX_ALWAYS_INLINE _Tp
1165 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1166 { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1167
1168 template<typename _Tp>
1169 _GLIBCXX_ALWAYS_INLINE _Tp
1170 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1171 { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1172
1173 template<typename _Tp>
1174 _GLIBCXX_ALWAYS_INLINE _Tp
1175 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1176 { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1177
1178 template<typename _Tp>
1179 _GLIBCXX_ALWAYS_INLINE _Tp
1180 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1181 { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1182
1183 template<typename _Tp>
1184 _GLIBCXX_ALWAYS_INLINE _Tp
1185 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1186 { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1187
1188 template<typename _Tp>
1189 _GLIBCXX_ALWAYS_INLINE _Tp
1190 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1191 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1192
1193 template<typename _Tp>
1194 _GLIBCXX_ALWAYS_INLINE _Tp
1195 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1196 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1197
1198 template<typename _Tp>
1199 _GLIBCXX_ALWAYS_INLINE _Tp
1200 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1201 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1202
1203 template<typename _Tp>
1204 _GLIBCXX_ALWAYS_INLINE _Tp
1205 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1206 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1207
1208 template<typename _Tp>
1209 _GLIBCXX_ALWAYS_INLINE _Tp
1210 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1211 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1212
1213 template<typename _Tp>
1214 _Tp
1215 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1216 {
1217 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1218 _Val<_Tp> __newval = __oldval + __i;
1219 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1220 memory_order_relaxed))
1221 __newval = __oldval + __i;
1222 return __oldval;
1223 }
1224
1225 template<typename _Tp>
1226 _Tp
1227 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1228 {
1229 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1230 _Val<_Tp> __newval = __oldval - __i;
1231 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1232 memory_order_relaxed))
1233 __newval = __oldval - __i;
1234 return __oldval;
1235 }
1236
1237 template<typename _Tp>
1238 _Tp
1239 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1240 {
1241 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1242 _Val<_Tp> __newval = __oldval + __i;
1243 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1244 memory_order_seq_cst,
1245 memory_order_relaxed))
1246 __newval = __oldval + __i;
1247 return __newval;
1248 }
1249
1250 template<typename _Tp>
1251 _Tp
1252 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1253 {
1254 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1255 _Val<_Tp> __newval = __oldval - __i;
1256 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1257 memory_order_seq_cst,
1258 memory_order_relaxed))
1259 __newval = __oldval - __i;
1260 return __newval;
1261 }
1262 } // namespace __atomic_impl
1263
1264 // base class for atomic<floating-point-type>
1265 template<typename _Fp>
1266 struct __atomic_float
1267 {
1268 static_assert(is_floating_point_v<_Fp>);
1269
1270 static constexpr size_t _S_alignment = __alignof__(_Fp);
1271
1272 public:
1273 using value_type = _Fp;
1274 using difference_type = value_type;
1275
1276 static constexpr bool is_always_lock_free
1277 = __atomic_always_lock_free(sizeof(_Fp), 0);
1278
1279 __atomic_float() = default;
1280
1281 constexpr
1282 __atomic_float(_Fp __t) : _M_fp(__t)
1283 { __atomic_impl::__clear_padding(_M_fp); }
1284
1285 __atomic_float(const __atomic_float&) = delete;
1286 __atomic_float& operator=(const __atomic_float&) = delete;
1287 __atomic_float& operator=(const __atomic_float&) volatile = delete;
1288
1289 _Fp
1290 operator=(_Fp __t) volatile noexcept
1291 {
1292 this->store(__t);
1293 return __t;
1294 }
1295
1296 _Fp
1297 operator=(_Fp __t) noexcept
1298 {
1299 this->store(__t);
1300 return __t;
1301 }
1302
1303 bool
1304 is_lock_free() const volatile noexcept
1305 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1306
1307 bool
1308 is_lock_free() const noexcept
1309 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1310
1311 void
1312 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1313 { __atomic_impl::store(&_M_fp, __t, __m); }
1314
1315 void
1316 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1317 { __atomic_impl::store(&_M_fp, __t, __m); }
1318
1319 _Fp
1320 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1321 { return __atomic_impl::load(&_M_fp, __m); }
1322
1323 _Fp
1324 load(memory_order __m = memory_order_seq_cst) const noexcept
1325 { return __atomic_impl::load(&_M_fp, __m); }
1326
1327 operator _Fp() const volatile noexcept { return this->load(); }
1328 operator _Fp() const noexcept { return this->load(); }
1329
1330 _Fp
1331 exchange(_Fp __desired,
1332 memory_order __m = memory_order_seq_cst) volatile noexcept
1333 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1334
1335 _Fp
1336 exchange(_Fp __desired,
1337 memory_order __m = memory_order_seq_cst) noexcept
1338 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1339
1340 bool
1341 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1342 memory_order __success,
1343 memory_order __failure) noexcept
1344 {
1345 return __atomic_impl::compare_exchange_weak(&_M_fp,
1346 __expected, __desired,
1347 __success, __failure);
1348 }
1349
1350 bool
1351 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1352 memory_order __success,
1353 memory_order __failure) volatile noexcept
1354 {
1355 return __atomic_impl::compare_exchange_weak(&_M_fp,
1356 __expected, __desired,
1357 __success, __failure);
1358 }
1359
1360 bool
1361 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1362 memory_order __success,
1363 memory_order __failure) noexcept
1364 {
1365 return __atomic_impl::compare_exchange_strong(&_M_fp,
1366 __expected, __desired,
1367 __success, __failure);
1368 }
1369
1370 bool
1371 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1372 memory_order __success,
1373 memory_order __failure) volatile noexcept
1374 {
1375 return __atomic_impl::compare_exchange_strong(&_M_fp,
1376 __expected, __desired,
1377 __success, __failure);
1378 }
1379
1380 bool
1381 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1382 memory_order __order = memory_order_seq_cst)
1383 noexcept
1384 {
1385 return compare_exchange_weak(__expected, __desired, __order,
1386 __cmpexch_failure_order(__order));
1387 }
1388
1389 bool
1390 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1391 memory_order __order = memory_order_seq_cst)
1392 volatile noexcept
1393 {
1394 return compare_exchange_weak(__expected, __desired, __order,
1395 __cmpexch_failure_order(__order));
1396 }
1397
1398 bool
1399 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1400 memory_order __order = memory_order_seq_cst)
1401 noexcept
1402 {
1403 return compare_exchange_strong(__expected, __desired, __order,
1404 __cmpexch_failure_order(__order));
1405 }
1406
1407 bool
1408 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1409 memory_order __order = memory_order_seq_cst)
1410 volatile noexcept
1411 {
1412 return compare_exchange_strong(__expected, __desired, __order,
1413 __cmpexch_failure_order(__order));
1414 }
1415
1416#if __glibcxx_atomic_wait
1417 _GLIBCXX_ALWAYS_INLINE void
1418 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1419 { __atomic_impl::wait(&_M_fp, __old, __m); }
1420
1421 // TODO add const volatile overload
1422
1423 _GLIBCXX_ALWAYS_INLINE void
1424 notify_one() const noexcept
1425 { __atomic_impl::notify_one(&_M_fp); }
1426
1427 // TODO add const volatile overload
1428
1429 _GLIBCXX_ALWAYS_INLINE void
1430 notify_all() const noexcept
1431 { __atomic_impl::notify_all(&_M_fp); }
1432
1433 // TODO add const volatile overload
1434#endif // __glibcxx_atomic_wait
1435
1436 value_type
1437 fetch_add(value_type __i,
1438 memory_order __m = memory_order_seq_cst) noexcept
1439 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1440
1441 value_type
1442 fetch_add(value_type __i,
1443 memory_order __m = memory_order_seq_cst) volatile noexcept
1444 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1445
1446 value_type
1447 fetch_sub(value_type __i,
1448 memory_order __m = memory_order_seq_cst) noexcept
1449 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1450
1451 value_type
1452 fetch_sub(value_type __i,
1453 memory_order __m = memory_order_seq_cst) volatile noexcept
1454 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1455
1456 value_type
1457 operator+=(value_type __i) noexcept
1458 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1459
1460 value_type
1461 operator+=(value_type __i) volatile noexcept
1462 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1463
1464 value_type
1465 operator-=(value_type __i) noexcept
1466 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1467
1468 value_type
1469 operator-=(value_type __i) volatile noexcept
1470 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1471
1472 private:
1473 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1474 };
1475#undef _GLIBCXX20_INIT
1476
1477 template<typename _Tp,
1478 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1479 bool = is_floating_point_v<_Tp>>
1480 struct __atomic_ref;
1481
1482 // base class for non-integral, non-floating-point, non-pointer types
1483 template<typename _Tp>
1484 struct __atomic_ref<_Tp, false, false>
1485 {
1486 static_assert(is_trivially_copyable_v<_Tp>);
1487
1488 // 1/2/4/8/16-byte types must be aligned to at least their size.
1489 static constexpr int _S_min_alignment
1490 = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1491 ? 0 : sizeof(_Tp);
1492
1493 public:
1494 using value_type = _Tp;
1495
1496 static constexpr bool is_always_lock_free
1497 = __atomic_always_lock_free(sizeof(_Tp), 0);
1498
1499 static constexpr size_t required_alignment
1500 = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1501
1502 __atomic_ref& operator=(const __atomic_ref&) = delete;
1503
1504 explicit
1505 __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1506 {
1507 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1508 }
1509
1510 __atomic_ref(const __atomic_ref&) noexcept = default;
1511
1512 _Tp
1513 operator=(_Tp __t) const noexcept
1514 {
1515 this->store(__t);
1516 return __t;
1517 }
1518
1519 operator _Tp() const noexcept { return this->load(); }
1520
1521 bool
1522 is_lock_free() const noexcept
1523 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1524
1525 void
1526 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1527 { __atomic_impl::store(_M_ptr, __t, __m); }
1528
1529 _Tp
1530 load(memory_order __m = memory_order_seq_cst) const noexcept
1531 { return __atomic_impl::load(_M_ptr, __m); }
1532
1533 _Tp
1534 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1535 const noexcept
1536 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1537
1538 bool
1539 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1540 memory_order __success,
1541 memory_order __failure) const noexcept
1542 {
1543 return __atomic_impl::compare_exchange_weak<true>(
1544 _M_ptr, __expected, __desired, __success, __failure);
1545 }
1546
1547 bool
1548 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1549 memory_order __success,
1550 memory_order __failure) const noexcept
1551 {
1552 return __atomic_impl::compare_exchange_strong<true>(
1553 _M_ptr, __expected, __desired, __success, __failure);
1554 }
1555
1556 bool
1557 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1558 memory_order __order = memory_order_seq_cst)
1559 const noexcept
1560 {
1561 return compare_exchange_weak(__expected, __desired, __order,
1562 __cmpexch_failure_order(__order));
1563 }
1564
1565 bool
1566 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1567 memory_order __order = memory_order_seq_cst)
1568 const noexcept
1569 {
1570 return compare_exchange_strong(__expected, __desired, __order,
1571 __cmpexch_failure_order(__order));
1572 }
1573
1574#if __glibcxx_atomic_wait
1575 _GLIBCXX_ALWAYS_INLINE void
1576 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1577 { __atomic_impl::wait(_M_ptr, __old, __m); }
1578
1579 // TODO add const volatile overload
1580
1581 _GLIBCXX_ALWAYS_INLINE void
1582 notify_one() const noexcept
1583 { __atomic_impl::notify_one(_M_ptr); }
1584
1585 // TODO add const volatile overload
1586
1587 _GLIBCXX_ALWAYS_INLINE void
1588 notify_all() const noexcept
1589 { __atomic_impl::notify_all(_M_ptr); }
1590
1591 // TODO add const volatile overload
1592#endif // __glibcxx_atomic_wait
1593
1594 private:
1595 _Tp* _M_ptr;
1596 };
1597
1598 // base class for atomic_ref<integral-type>
1599 template<typename _Tp>
1600 struct __atomic_ref<_Tp, true, false>
1601 {
1602 static_assert(is_integral_v<_Tp>);
1603
1604 public:
1605 using value_type = _Tp;
1606 using difference_type = value_type;
1607
1608 static constexpr bool is_always_lock_free
1609 = __atomic_always_lock_free(sizeof(_Tp), 0);
1610
1611 static constexpr size_t required_alignment
1612 = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1613
1614 __atomic_ref() = delete;
1615 __atomic_ref& operator=(const __atomic_ref&) = delete;
1616
1617 explicit
1618 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1619 {
1620 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1621 }
1622
1623 __atomic_ref(const __atomic_ref&) noexcept = default;
1624
1625 _Tp
1626 operator=(_Tp __t) const noexcept
1627 {
1628 this->store(__t);
1629 return __t;
1630 }
1631
1632 operator _Tp() const noexcept { return this->load(); }
1633
1634 bool
1635 is_lock_free() const noexcept
1636 {
1637 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1638 }
1639
1640 void
1641 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1642 { __atomic_impl::store(_M_ptr, __t, __m); }
1643
1644 _Tp
1645 load(memory_order __m = memory_order_seq_cst) const noexcept
1646 { return __atomic_impl::load(_M_ptr, __m); }
1647
1648 _Tp
1649 exchange(_Tp __desired,
1650 memory_order __m = memory_order_seq_cst) const noexcept
1651 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1652
1653 bool
1654 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1655 memory_order __success,
1656 memory_order __failure) const noexcept
1657 {
1658 return __atomic_impl::compare_exchange_weak<true>(
1659 _M_ptr, __expected, __desired, __success, __failure);
1660 }
1661
1662 bool
1663 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1664 memory_order __success,
1665 memory_order __failure) const noexcept
1666 {
1667 return __atomic_impl::compare_exchange_strong<true>(
1668 _M_ptr, __expected, __desired, __success, __failure);
1669 }
1670
1671 bool
1672 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1673 memory_order __order = memory_order_seq_cst)
1674 const noexcept
1675 {
1676 return compare_exchange_weak(__expected, __desired, __order,
1677 __cmpexch_failure_order(__order));
1678 }
1679
1680 bool
1681 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1682 memory_order __order = memory_order_seq_cst)
1683 const noexcept
1684 {
1685 return compare_exchange_strong(__expected, __desired, __order,
1686 __cmpexch_failure_order(__order));
1687 }
1688
1689#if __glibcxx_atomic_wait
1690 _GLIBCXX_ALWAYS_INLINE void
1691 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1692 { __atomic_impl::wait(_M_ptr, __old, __m); }
1693
1694 // TODO add const volatile overload
1695
1696 _GLIBCXX_ALWAYS_INLINE void
1697 notify_one() const noexcept
1698 { __atomic_impl::notify_one(_M_ptr); }
1699
1700 // TODO add const volatile overload
1701
1702 _GLIBCXX_ALWAYS_INLINE void
1703 notify_all() const noexcept
1704 { __atomic_impl::notify_all(_M_ptr); }
1705
1706 // TODO add const volatile overload
1707#endif // __glibcxx_atomic_wait
1708
1709 value_type
1710 fetch_add(value_type __i,
1711 memory_order __m = memory_order_seq_cst) const noexcept
1712 { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1713
1714 value_type
1715 fetch_sub(value_type __i,
1716 memory_order __m = memory_order_seq_cst) const noexcept
1717 { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1718
1719 value_type
1720 fetch_and(value_type __i,
1721 memory_order __m = memory_order_seq_cst) const noexcept
1722 { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1723
1724 value_type
1725 fetch_or(value_type __i,
1726 memory_order __m = memory_order_seq_cst) const noexcept
1727 { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1728
1729 value_type
1730 fetch_xor(value_type __i,
1731 memory_order __m = memory_order_seq_cst) const noexcept
1732 { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1733
1734 _GLIBCXX_ALWAYS_INLINE value_type
1735 operator++(int) const noexcept
1736 { return fetch_add(1); }
1737
1738 _GLIBCXX_ALWAYS_INLINE value_type
1739 operator--(int) const noexcept
1740 { return fetch_sub(1); }
1741
1742 value_type
1743 operator++() const noexcept
1744 { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1745
1746 value_type
1747 operator--() const noexcept
1748 { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1749
1750 value_type
1751 operator+=(value_type __i) const noexcept
1752 { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1753
1754 value_type
1755 operator-=(value_type __i) const noexcept
1756 { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1757
1758 value_type
1759 operator&=(value_type __i) const noexcept
1760 { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1761
1762 value_type
1763 operator|=(value_type __i) const noexcept
1764 { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1765
1766 value_type
1767 operator^=(value_type __i) const noexcept
1768 { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1769
1770 private:
1771 _Tp* _M_ptr;
1772 };
1773
1774 // base class for atomic_ref<floating-point-type>
1775 template<typename _Fp>
1776 struct __atomic_ref<_Fp, false, true>
1777 {
1778 static_assert(is_floating_point_v<_Fp>);
1779
1780 public:
1781 using value_type = _Fp;
1782 using difference_type = value_type;
1783
1784 static constexpr bool is_always_lock_free
1785 = __atomic_always_lock_free(sizeof(_Fp), 0);
1786
1787 static constexpr size_t required_alignment = __alignof__(_Fp);
1788
1789 __atomic_ref() = delete;
1790 __atomic_ref& operator=(const __atomic_ref&) = delete;
1791
1792 explicit
1793 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1794 {
1795 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1796 }
1797
1798 __atomic_ref(const __atomic_ref&) noexcept = default;
1799
1800 _Fp
1801 operator=(_Fp __t) const noexcept
1802 {
1803 this->store(__t);
1804 return __t;
1805 }
1806
1807 operator _Fp() const noexcept { return this->load(); }
1808
1809 bool
1810 is_lock_free() const noexcept
1811 {
1812 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1813 }
1814
1815 void
1816 store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1817 { __atomic_impl::store(_M_ptr, __t, __m); }
1818
1819 _Fp
1820 load(memory_order __m = memory_order_seq_cst) const noexcept
1821 { return __atomic_impl::load(_M_ptr, __m); }
1822
1823 _Fp
1824 exchange(_Fp __desired,
1825 memory_order __m = memory_order_seq_cst) const noexcept
1826 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1827
1828 bool
1829 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1830 memory_order __success,
1831 memory_order __failure) const noexcept
1832 {
1833 return __atomic_impl::compare_exchange_weak<true>(
1834 _M_ptr, __expected, __desired, __success, __failure);
1835 }
1836
1837 bool
1838 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1839 memory_order __success,
1840 memory_order __failure) const noexcept
1841 {
1842 return __atomic_impl::compare_exchange_strong<true>(
1843 _M_ptr, __expected, __desired, __success, __failure);
1844 }
1845
1846 bool
1847 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1848 memory_order __order = memory_order_seq_cst)
1849 const noexcept
1850 {
1851 return compare_exchange_weak(__expected, __desired, __order,
1852 __cmpexch_failure_order(__order));
1853 }
1854
1855 bool
1856 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1857 memory_order __order = memory_order_seq_cst)
1858 const noexcept
1859 {
1860 return compare_exchange_strong(__expected, __desired, __order,
1861 __cmpexch_failure_order(__order));
1862 }
1863
1864#if __glibcxx_atomic_wait
1865 _GLIBCXX_ALWAYS_INLINE void
1866 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1867 { __atomic_impl::wait(_M_ptr, __old, __m); }
1868
1869 // TODO add const volatile overload
1870
1871 _GLIBCXX_ALWAYS_INLINE void
1872 notify_one() const noexcept
1873 { __atomic_impl::notify_one(_M_ptr); }
1874
1875 // TODO add const volatile overload
1876
1877 _GLIBCXX_ALWAYS_INLINE void
1878 notify_all() const noexcept
1879 { __atomic_impl::notify_all(_M_ptr); }
1880
1881 // TODO add const volatile overload
1882#endif // __glibcxx_atomic_wait
1883
1884 value_type
1885 fetch_add(value_type __i,
1886 memory_order __m = memory_order_seq_cst) const noexcept
1887 { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1888
1889 value_type
1890 fetch_sub(value_type __i,
1891 memory_order __m = memory_order_seq_cst) const noexcept
1892 { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1893
1894 value_type
1895 operator+=(value_type __i) const noexcept
1896 { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1897
1898 value_type
1899 operator-=(value_type __i) const noexcept
1900 { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1901
1902 private:
1903 _Fp* _M_ptr;
1904 };
1905
1906 // base class for atomic_ref<pointer-type>
1907 template<typename _Tp>
1908 struct __atomic_ref<_Tp*, false, false>
1909 {
1910 public:
1911 using value_type = _Tp*;
1912 using difference_type = ptrdiff_t;
1913
1914 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1915
1916 static constexpr size_t required_alignment = __alignof__(_Tp*);
1917
1918 __atomic_ref() = delete;
1919 __atomic_ref& operator=(const __atomic_ref&) = delete;
1920
1921 explicit
1922 __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1923 {
1924 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1925 }
1926
1927 __atomic_ref(const __atomic_ref&) noexcept = default;
1928
1929 _Tp*
1930 operator=(_Tp* __t) const noexcept
1931 {
1932 this->store(__t);
1933 return __t;
1934 }
1935
1936 operator _Tp*() const noexcept { return this->load(); }
1937
1938 bool
1939 is_lock_free() const noexcept
1940 {
1941 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1942 }
1943
1944 void
1945 store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1946 { __atomic_impl::store(_M_ptr, __t, __m); }
1947
1948 _Tp*
1949 load(memory_order __m = memory_order_seq_cst) const noexcept
1950 { return __atomic_impl::load(_M_ptr, __m); }
1951
1952 _Tp*
1953 exchange(_Tp* __desired,
1954 memory_order __m = memory_order_seq_cst) const noexcept
1955 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1956
1957 bool
1958 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1959 memory_order __success,
1960 memory_order __failure) const noexcept
1961 {
1962 return __atomic_impl::compare_exchange_weak<true>(
1963 _M_ptr, __expected, __desired, __success, __failure);
1964 }
1965
1966 bool
1967 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1968 memory_order __success,
1969 memory_order __failure) const noexcept
1970 {
1971 return __atomic_impl::compare_exchange_strong<true>(
1972 _M_ptr, __expected, __desired, __success, __failure);
1973 }
1974
1975 bool
1976 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1977 memory_order __order = memory_order_seq_cst)
1978 const noexcept
1979 {
1980 return compare_exchange_weak(__expected, __desired, __order,
1981 __cmpexch_failure_order(__order));
1982 }
1983
1984 bool
1985 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1986 memory_order __order = memory_order_seq_cst)
1987 const noexcept
1988 {
1989 return compare_exchange_strong(__expected, __desired, __order,
1990 __cmpexch_failure_order(__order));
1991 }
1992
1993#if __glibcxx_atomic_wait
1994 _GLIBCXX_ALWAYS_INLINE void
1995 wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
1996 { __atomic_impl::wait(_M_ptr, __old, __m); }
1997
1998 // TODO add const volatile overload
1999
2000 _GLIBCXX_ALWAYS_INLINE void
2001 notify_one() const noexcept
2002 { __atomic_impl::notify_one(_M_ptr); }
2003
2004 // TODO add const volatile overload
2005
2006 _GLIBCXX_ALWAYS_INLINE void
2007 notify_all() const noexcept
2008 { __atomic_impl::notify_all(_M_ptr); }
2009
2010 // TODO add const volatile overload
2011#endif // __glibcxx_atomic_wait
2012
2013 _GLIBCXX_ALWAYS_INLINE value_type
2014 fetch_add(difference_type __d,
2015 memory_order __m = memory_order_seq_cst) const noexcept
2016 { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2017
2018 _GLIBCXX_ALWAYS_INLINE value_type
2019 fetch_sub(difference_type __d,
2020 memory_order __m = memory_order_seq_cst) const noexcept
2021 { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2022
2023 value_type
2024 operator++(int) const noexcept
2025 { return fetch_add(1); }
2026
2027 value_type
2028 operator--(int) const noexcept
2029 { return fetch_sub(1); }
2030
2031 value_type
2032 operator++() const noexcept
2033 {
2034 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2035 }
2036
2037 value_type
2038 operator--() const noexcept
2039 {
2040 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2041 }
2042
2043 value_type
2044 operator+=(difference_type __d) const noexcept
2045 {
2046 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2047 }
2048
2049 value_type
2050 operator-=(difference_type __d) const noexcept
2051 {
2052 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2053 }
2054
2055 private:
2056 static constexpr ptrdiff_t
2057 _S_type_size(ptrdiff_t __d) noexcept
2058 {
2059 static_assert(is_object_v<_Tp>);
2060 return __d * sizeof(_Tp);
2061 }
2062
2063 _Tp** _M_ptr;
2064 };
2065#endif // C++2a
2066
2067 /// @endcond
2068
2069 /// @} group atomics
2070
2071_GLIBCXX_END_NAMESPACE_VERSION
2072} // namespace std
2073
2074#endif
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:51
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
Definition atomic_base.h:64
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1570
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1560
atomic_flag