30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
33#pragma GCC system_header
40#if __cplusplus > 201703L && _GLIBCXX_HOSTED
44#ifndef _GLIBCXX_ALWAYS_INLINE
45#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
50namespace std _GLIBCXX_VISIBILITY(default)
52_GLIBCXX_BEGIN_NAMESPACE_VERSION
62#if __cplusplus > 201703L
73 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
74 inline constexpr memory_order memory_order_consume = memory_order::consume;
75 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
76 inline constexpr memory_order memory_order_release = memory_order::release;
77 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
78 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
92 enum __memory_order_modifier
94 __memory_order_mask = 0x0ffff,
95 __memory_order_modifier_mask = 0xffff0000,
96 __memory_order_hle_acquire = 0x10000,
97 __memory_order_hle_release = 0x20000
119 return __m == memory_order_acq_rel ? memory_order_acquire
120 : __m == memory_order_release ? memory_order_relaxed : __m;
126 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
127 | __memory_order_modifier(__m & __memory_order_modifier_mask));
131 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
133 return (__m & __memory_order_mask) != memory_order_release
134 && (__m & __memory_order_mask) != memory_order_acq_rel;
138 template<
typename _IntTp>
139 struct __atomic_base;
143 _GLIBCXX_ALWAYS_INLINE
void
145 { __atomic_thread_fence(
int(__m)); }
147 _GLIBCXX_ALWAYS_INLINE
void
149 { __atomic_signal_fence(
int(__m)); }
152 template<
typename _Tp>
161#if __glibcxx_atomic_value_initialization
162# define _GLIBCXX20_INIT(I) = I
164# define _GLIBCXX20_INIT(I)
168#define ATOMIC_VAR_INIT(_VI) { _VI }
170 template<
typename _Tp>
173 template<
typename _Tp>
177#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
178 typedef bool __atomic_flag_data_type;
180 typedef unsigned char __atomic_flag_data_type;
195 _GLIBCXX_BEGIN_EXTERN_C
197 struct __atomic_flag_base
199 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
202 _GLIBCXX_END_EXTERN_C
206#define ATOMIC_FLAG_INIT { 0 }
219 : __atomic_flag_base{ _S_init(__i) }
222 _GLIBCXX_ALWAYS_INLINE
bool
223 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
225 return __atomic_test_and_set (&_M_i,
int(__m));
228 _GLIBCXX_ALWAYS_INLINE
bool
229 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
231 return __atomic_test_and_set (&_M_i,
int(__m));
234#ifdef __glibcxx_atomic_flag_test
235 _GLIBCXX_ALWAYS_INLINE
bool
236 test(
memory_order __m = memory_order_seq_cst)
const noexcept
238 __atomic_flag_data_type __v;
239 __atomic_load(&_M_i, &__v,
int(__m));
240 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
243 _GLIBCXX_ALWAYS_INLINE
bool
244 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
246 __atomic_flag_data_type __v;
247 __atomic_load(&_M_i, &__v,
int(__m));
248 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
252#if __glibcxx_atomic_wait
253 _GLIBCXX_ALWAYS_INLINE
void
257 const __atomic_flag_data_type __v
258 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
260 std::__atomic_wait_address_v(&_M_i, __v,
261 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
266 _GLIBCXX_ALWAYS_INLINE
void
267 notify_one()
noexcept
268 { std::__atomic_notify_address(&_M_i,
false); }
272 _GLIBCXX_ALWAYS_INLINE
void
273 notify_all()
noexcept
274 { std::__atomic_notify_address(&_M_i,
true); }
279 _GLIBCXX_ALWAYS_INLINE
void
283 = __m & __memory_order_mask;
284 __glibcxx_assert(__b != memory_order_consume);
285 __glibcxx_assert(__b != memory_order_acquire);
286 __glibcxx_assert(__b != memory_order_acq_rel);
288 __atomic_clear (&_M_i,
int(__m));
291 _GLIBCXX_ALWAYS_INLINE
void
292 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
295 = __m & __memory_order_mask;
296 __glibcxx_assert(__b != memory_order_consume);
297 __glibcxx_assert(__b != memory_order_acquire);
298 __glibcxx_assert(__b != memory_order_acq_rel);
300 __atomic_clear (&_M_i,
int(__m));
304 static constexpr __atomic_flag_data_type
306 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
335 template<
typename _ITp>
338 using value_type = _ITp;
339 using difference_type = value_type;
342 typedef _ITp __int_type;
344 static constexpr int _S_alignment =
345 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
347 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
350 __atomic_base() noexcept = default;
351 ~__atomic_base() noexcept = default;
352 __atomic_base(const __atomic_base&) = delete;
353 __atomic_base& operator=(const __atomic_base&) = delete;
354 __atomic_base& operator=(const __atomic_base&) volatile = delete;
357 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
359 operator __int_type() const noexcept
362 operator __int_type() const volatile noexcept
366 operator=(__int_type __i)
noexcept
373 operator=(__int_type __i)
volatile noexcept
380 operator++(
int)
noexcept
381 {
return fetch_add(1); }
384 operator++(
int)
volatile noexcept
385 {
return fetch_add(1); }
388 operator--(
int)
noexcept
389 {
return fetch_sub(1); }
392 operator--(
int)
volatile noexcept
393 {
return fetch_sub(1); }
396 operator++() noexcept
397 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
400 operator++() volatile noexcept
401 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
404 operator--() noexcept
405 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
408 operator--() volatile noexcept
409 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
412 operator+=(__int_type __i)
noexcept
413 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
416 operator+=(__int_type __i)
volatile noexcept
417 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
420 operator-=(__int_type __i)
noexcept
421 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
424 operator-=(__int_type __i)
volatile noexcept
425 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
428 operator&=(__int_type __i)
noexcept
429 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
432 operator&=(__int_type __i)
volatile noexcept
433 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
436 operator|=(__int_type __i)
noexcept
437 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
440 operator|=(__int_type __i)
volatile noexcept
441 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
444 operator^=(__int_type __i)
noexcept
445 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
448 operator^=(__int_type __i)
volatile noexcept
449 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
452 is_lock_free() const noexcept
455 return __atomic_is_lock_free(
sizeof(_M_i),
456 reinterpret_cast<void *
>(-_S_alignment));
460 is_lock_free() const volatile noexcept
463 return __atomic_is_lock_free(
sizeof(_M_i),
464 reinterpret_cast<void *
>(-_S_alignment));
467 _GLIBCXX_ALWAYS_INLINE
void
468 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
471 = __m & __memory_order_mask;
472 __glibcxx_assert(__b != memory_order_acquire);
473 __glibcxx_assert(__b != memory_order_acq_rel);
474 __glibcxx_assert(__b != memory_order_consume);
476 __atomic_store_n(&_M_i, __i,
int(__m));
479 _GLIBCXX_ALWAYS_INLINE
void
480 store(__int_type __i,
481 memory_order __m = memory_order_seq_cst)
volatile noexcept
484 = __m & __memory_order_mask;
485 __glibcxx_assert(__b != memory_order_acquire);
486 __glibcxx_assert(__b != memory_order_acq_rel);
487 __glibcxx_assert(__b != memory_order_consume);
489 __atomic_store_n(&_M_i, __i,
int(__m));
492 _GLIBCXX_ALWAYS_INLINE __int_type
493 load(memory_order __m = memory_order_seq_cst)
const noexcept
496 = __m & __memory_order_mask;
497 __glibcxx_assert(__b != memory_order_release);
498 __glibcxx_assert(__b != memory_order_acq_rel);
500 return __atomic_load_n(&_M_i,
int(__m));
503 _GLIBCXX_ALWAYS_INLINE __int_type
504 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
507 = __m & __memory_order_mask;
508 __glibcxx_assert(__b != memory_order_release);
509 __glibcxx_assert(__b != memory_order_acq_rel);
511 return __atomic_load_n(&_M_i,
int(__m));
514 _GLIBCXX_ALWAYS_INLINE __int_type
515 exchange(__int_type __i,
516 memory_order __m = memory_order_seq_cst)
noexcept
518 return __atomic_exchange_n(&_M_i, __i,
int(__m));
522 _GLIBCXX_ALWAYS_INLINE __int_type
523 exchange(__int_type __i,
524 memory_order __m = memory_order_seq_cst)
volatile noexcept
526 return __atomic_exchange_n(&_M_i, __i,
int(__m));
529 _GLIBCXX_ALWAYS_INLINE
bool
530 compare_exchange_weak(__int_type& __i1, __int_type __i2,
531 memory_order __m1, memory_order __m2)
noexcept
533 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
535 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
536 int(__m1),
int(__m2));
539 _GLIBCXX_ALWAYS_INLINE
bool
540 compare_exchange_weak(__int_type& __i1, __int_type __i2,
542 memory_order __m2)
volatile noexcept
544 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
546 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
547 int(__m1),
int(__m2));
550 _GLIBCXX_ALWAYS_INLINE
bool
551 compare_exchange_weak(__int_type& __i1, __int_type __i2,
552 memory_order __m = memory_order_seq_cst)
noexcept
554 return compare_exchange_weak(__i1, __i2, __m,
555 __cmpexch_failure_order(__m));
558 _GLIBCXX_ALWAYS_INLINE
bool
559 compare_exchange_weak(__int_type& __i1, __int_type __i2,
560 memory_order __m = memory_order_seq_cst)
volatile noexcept
562 return compare_exchange_weak(__i1, __i2, __m,
563 __cmpexch_failure_order(__m));
566 _GLIBCXX_ALWAYS_INLINE
bool
567 compare_exchange_strong(__int_type& __i1, __int_type __i2,
568 memory_order __m1, memory_order __m2)
noexcept
570 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
572 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
573 int(__m1),
int(__m2));
576 _GLIBCXX_ALWAYS_INLINE
bool
577 compare_exchange_strong(__int_type& __i1, __int_type __i2,
579 memory_order __m2)
volatile noexcept
581 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
583 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
584 int(__m1),
int(__m2));
587 _GLIBCXX_ALWAYS_INLINE
bool
588 compare_exchange_strong(__int_type& __i1, __int_type __i2,
589 memory_order __m = memory_order_seq_cst)
noexcept
591 return compare_exchange_strong(__i1, __i2, __m,
592 __cmpexch_failure_order(__m));
595 _GLIBCXX_ALWAYS_INLINE
bool
596 compare_exchange_strong(__int_type& __i1, __int_type __i2,
597 memory_order __m = memory_order_seq_cst)
volatile noexcept
599 return compare_exchange_strong(__i1, __i2, __m,
600 __cmpexch_failure_order(__m));
603#if __glibcxx_atomic_wait
604 _GLIBCXX_ALWAYS_INLINE
void
605 wait(__int_type __old,
606 memory_order __m = memory_order_seq_cst)
const noexcept
608 std::__atomic_wait_address_v(&_M_i, __old,
609 [__m,
this] {
return this->load(__m); });
614 _GLIBCXX_ALWAYS_INLINE
void
615 notify_one() noexcept
616 { std::__atomic_notify_address(&_M_i,
false); }
620 _GLIBCXX_ALWAYS_INLINE
void
621 notify_all() noexcept
622 { std::__atomic_notify_address(&_M_i,
true); }
627 _GLIBCXX_ALWAYS_INLINE __int_type
628 fetch_add(__int_type __i,
629 memory_order __m = memory_order_seq_cst)
noexcept
630 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
632 _GLIBCXX_ALWAYS_INLINE __int_type
633 fetch_add(__int_type __i,
634 memory_order __m = memory_order_seq_cst)
volatile noexcept
635 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
637 _GLIBCXX_ALWAYS_INLINE __int_type
638 fetch_sub(__int_type __i,
639 memory_order __m = memory_order_seq_cst)
noexcept
640 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
642 _GLIBCXX_ALWAYS_INLINE __int_type
643 fetch_sub(__int_type __i,
644 memory_order __m = memory_order_seq_cst)
volatile noexcept
645 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
647 _GLIBCXX_ALWAYS_INLINE __int_type
648 fetch_and(__int_type __i,
649 memory_order __m = memory_order_seq_cst)
noexcept
650 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
652 _GLIBCXX_ALWAYS_INLINE __int_type
653 fetch_and(__int_type __i,
654 memory_order __m = memory_order_seq_cst)
volatile noexcept
655 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
657 _GLIBCXX_ALWAYS_INLINE __int_type
658 fetch_or(__int_type __i,
659 memory_order __m = memory_order_seq_cst)
noexcept
660 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
662 _GLIBCXX_ALWAYS_INLINE __int_type
663 fetch_or(__int_type __i,
664 memory_order __m = memory_order_seq_cst)
volatile noexcept
665 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
667 _GLIBCXX_ALWAYS_INLINE __int_type
668 fetch_xor(__int_type __i,
669 memory_order __m = memory_order_seq_cst)
noexcept
670 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
672 _GLIBCXX_ALWAYS_INLINE __int_type
673 fetch_xor(__int_type __i,
674 memory_order __m = memory_order_seq_cst)
volatile noexcept
675 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
680 template<
typename _PTp>
681 struct __atomic_base<_PTp*>
684 typedef _PTp* __pointer_type;
686 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
690 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
693 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
696 __atomic_base() noexcept = default;
697 ~__atomic_base() noexcept = default;
698 __atomic_base(const __atomic_base&) = delete;
699 __atomic_base& operator=(const __atomic_base&) = delete;
700 __atomic_base& operator=(const __atomic_base&) volatile = delete;
703 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
705 operator __pointer_type() const noexcept
708 operator __pointer_type() const volatile noexcept
712 operator=(__pointer_type __p)
noexcept
719 operator=(__pointer_type __p)
volatile noexcept
726 operator++(
int)
noexcept
727 {
return fetch_add(1); }
730 operator++(
int)
volatile noexcept
731 {
return fetch_add(1); }
734 operator--(
int)
noexcept
735 {
return fetch_sub(1); }
738 operator--(
int)
volatile noexcept
739 {
return fetch_sub(1); }
742 operator++() noexcept
743 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
744 int(memory_order_seq_cst)); }
747 operator++() volatile noexcept
748 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
749 int(memory_order_seq_cst)); }
752 operator--() noexcept
753 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
754 int(memory_order_seq_cst)); }
757 operator--() volatile noexcept
758 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
759 int(memory_order_seq_cst)); }
762 operator+=(ptrdiff_t __d)
noexcept
763 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
764 int(memory_order_seq_cst)); }
767 operator+=(ptrdiff_t __d)
volatile noexcept
768 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
769 int(memory_order_seq_cst)); }
772 operator-=(ptrdiff_t __d)
noexcept
773 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
774 int(memory_order_seq_cst)); }
777 operator-=(ptrdiff_t __d)
volatile noexcept
778 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
779 int(memory_order_seq_cst)); }
782 is_lock_free() const noexcept
785 return __atomic_is_lock_free(
sizeof(_M_p),
786 reinterpret_cast<void *
>(-__alignof(_M_p)));
790 is_lock_free() const volatile noexcept
793 return __atomic_is_lock_free(
sizeof(_M_p),
794 reinterpret_cast<void *
>(-__alignof(_M_p)));
797 _GLIBCXX_ALWAYS_INLINE
void
798 store(__pointer_type __p,
802 = __m & __memory_order_mask;
804 __glibcxx_assert(__b != memory_order_acquire);
805 __glibcxx_assert(__b != memory_order_acq_rel);
806 __glibcxx_assert(__b != memory_order_consume);
808 __atomic_store_n(&_M_p, __p,
int(__m));
811 _GLIBCXX_ALWAYS_INLINE
void
812 store(__pointer_type __p,
813 memory_order __m = memory_order_seq_cst)
volatile noexcept
816 = __m & __memory_order_mask;
817 __glibcxx_assert(__b != memory_order_acquire);
818 __glibcxx_assert(__b != memory_order_acq_rel);
819 __glibcxx_assert(__b != memory_order_consume);
821 __atomic_store_n(&_M_p, __p,
int(__m));
824 _GLIBCXX_ALWAYS_INLINE __pointer_type
825 load(
memory_order __m = memory_order_seq_cst)
const noexcept
828 = __m & __memory_order_mask;
829 __glibcxx_assert(__b != memory_order_release);
830 __glibcxx_assert(__b != memory_order_acq_rel);
832 return __atomic_load_n(&_M_p,
int(__m));
835 _GLIBCXX_ALWAYS_INLINE __pointer_type
836 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
839 = __m & __memory_order_mask;
840 __glibcxx_assert(__b != memory_order_release);
841 __glibcxx_assert(__b != memory_order_acq_rel);
843 return __atomic_load_n(&_M_p,
int(__m));
846 _GLIBCXX_ALWAYS_INLINE __pointer_type
847 exchange(__pointer_type __p,
850 return __atomic_exchange_n(&_M_p, __p,
int(__m));
854 _GLIBCXX_ALWAYS_INLINE __pointer_type
855 exchange(__pointer_type __p,
856 memory_order __m = memory_order_seq_cst)
volatile noexcept
858 return __atomic_exchange_n(&_M_p, __p,
int(__m));
861 _GLIBCXX_ALWAYS_INLINE
bool
862 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
866 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
868 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
869 int(__m1),
int(__m2));
872 _GLIBCXX_ALWAYS_INLINE
bool
873 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
877 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
879 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
880 int(__m1),
int(__m2));
883 _GLIBCXX_ALWAYS_INLINE
bool
884 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
888 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
890 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
891 int(__m1),
int(__m2));
894 _GLIBCXX_ALWAYS_INLINE
bool
895 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
899 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
901 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
902 int(__m1),
int(__m2));
905#if __glibcxx_atomic_wait
906 _GLIBCXX_ALWAYS_INLINE
void
907 wait(__pointer_type __old,
910 std::__atomic_wait_address_v(&_M_p, __old,
912 {
return this->load(__m); });
917 _GLIBCXX_ALWAYS_INLINE
void
918 notify_one() const noexcept
919 { std::__atomic_notify_address(&_M_p,
false); }
923 _GLIBCXX_ALWAYS_INLINE
void
924 notify_all() const noexcept
925 { std::__atomic_notify_address(&_M_p,
true); }
930 _GLIBCXX_ALWAYS_INLINE __pointer_type
931 fetch_add(ptrdiff_t __d,
933 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
935 _GLIBCXX_ALWAYS_INLINE __pointer_type
936 fetch_add(ptrdiff_t __d,
937 memory_order __m = memory_order_seq_cst)
volatile noexcept
938 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
940 _GLIBCXX_ALWAYS_INLINE __pointer_type
941 fetch_sub(ptrdiff_t __d,
943 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
945 _GLIBCXX_ALWAYS_INLINE __pointer_type
946 fetch_sub(ptrdiff_t __d,
947 memory_order __m = memory_order_seq_cst)
volatile noexcept
948 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
951 namespace __atomic_impl
955 template<
typename _Tp>
957 __maybe_has_padding()
959#if ! __has_builtin(__builtin_clear_padding)
961#elif __has_builtin(__has_unique_object_representations)
962 return !__has_unique_object_representations(_Tp)
963 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
969 template<
typename _Tp>
970 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
971 __clear_padding(_Tp& __val)
noexcept
974#if __has_builtin(__builtin_clear_padding)
975 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
976 __builtin_clear_padding(__ptr);
982 template<
typename _Tp>
983 using _Val =
typename remove_volatile<_Tp>::type;
985#pragma GCC diagnostic push
986#pragma GCC diagnostic ignored "-Wc++17-extensions"
988 template<
bool _AtomicRef = false,
typename _Tp>
989 _GLIBCXX_ALWAYS_INLINE
bool
990 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
992 memory_order __s, memory_order __f)
noexcept
994 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
996 using _Vp = _Val<_Tp>;
999 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1003 int(__s),
int(__f));
1005 else if constexpr (!_AtomicRef)
1008 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1012 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1016 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1017 __is_weak,
int(__s),
int(__f)))
1026 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1032 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1049 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1050 __is_weak,
int(__s),
int(__f)))
1057 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1058 __atomic_impl::__clear_padding(__curr),
1069#pragma GCC diagnostic pop
1072#if __cplusplus > 201703L
1074 namespace __atomic_impl
1077 template<
typename _Tp>
1078 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1080 template<
size_t _Size,
size_t _Align>
1081 _GLIBCXX_ALWAYS_INLINE
bool
1082 is_lock_free() noexcept
1085 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1088 template<
typename _Tp>
1089 _GLIBCXX_ALWAYS_INLINE
void
1090 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1092 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1095 template<
typename _Tp>
1096 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1097 load(
const _Tp* __ptr, memory_order __m)
noexcept
1099 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1100 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1101 __atomic_load(__ptr, __dest,
int(__m));
1105 template<
typename _Tp>
1106 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1107 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1109 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1110 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1111 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1116 template<
bool _AtomicRef = false,
typename _Tp>
1117 _GLIBCXX_ALWAYS_INLINE
bool
1118 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1119 _Val<_Tp> __desired, memory_order __success,
1120 memory_order __failure)
noexcept
1122 return __atomic_impl::__compare_exchange<_AtomicRef>(
1123 *__ptr, __expected, __desired,
true, __success, __failure);
1126 template<
bool _AtomicRef = false,
typename _Tp>
1127 _GLIBCXX_ALWAYS_INLINE
bool
1128 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1129 _Val<_Tp> __desired, memory_order __success,
1130 memory_order __failure)
noexcept
1132 return __atomic_impl::__compare_exchange<_AtomicRef>(
1133 *__ptr, __expected, __desired,
false, __success, __failure);
1136#if __glibcxx_atomic_wait
1137 template<
typename _Tp>
1138 _GLIBCXX_ALWAYS_INLINE
void
1139 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1140 memory_order __m = memory_order_seq_cst)
noexcept
1142 std::__atomic_wait_address_v(__ptr, __old,
1143 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1148 template<
typename _Tp>
1149 _GLIBCXX_ALWAYS_INLINE
void
1150 notify_one(
const _Tp* __ptr)
noexcept
1151 { std::__atomic_notify_address(__ptr,
false); }
1155 template<
typename _Tp>
1156 _GLIBCXX_ALWAYS_INLINE
void
1157 notify_all(
const _Tp* __ptr)
noexcept
1158 { std::__atomic_notify_address(__ptr,
true); }
1163 template<
typename _Tp>
1164 _GLIBCXX_ALWAYS_INLINE _Tp
1165 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1166 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1168 template<
typename _Tp>
1169 _GLIBCXX_ALWAYS_INLINE _Tp
1170 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1171 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1173 template<
typename _Tp>
1174 _GLIBCXX_ALWAYS_INLINE _Tp
1175 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1176 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1178 template<
typename _Tp>
1179 _GLIBCXX_ALWAYS_INLINE _Tp
1180 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1181 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1183 template<
typename _Tp>
1184 _GLIBCXX_ALWAYS_INLINE _Tp
1185 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1186 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1188 template<
typename _Tp>
1189 _GLIBCXX_ALWAYS_INLINE _Tp
1190 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1191 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1193 template<
typename _Tp>
1194 _GLIBCXX_ALWAYS_INLINE _Tp
1195 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1196 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1198 template<
typename _Tp>
1199 _GLIBCXX_ALWAYS_INLINE _Tp
1200 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1201 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1203 template<
typename _Tp>
1204 _GLIBCXX_ALWAYS_INLINE _Tp
1205 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1206 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1208 template<
typename _Tp>
1209 _GLIBCXX_ALWAYS_INLINE _Tp
1210 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1211 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1213 template<
typename _Tp>
1215 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1217 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1218 _Val<_Tp> __newval = __oldval + __i;
1219 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1220 memory_order_relaxed))
1221 __newval = __oldval + __i;
1225 template<
typename _Tp>
1227 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1229 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1230 _Val<_Tp> __newval = __oldval - __i;
1231 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1232 memory_order_relaxed))
1233 __newval = __oldval - __i;
1237 template<
typename _Tp>
1239 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1241 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1242 _Val<_Tp> __newval = __oldval + __i;
1243 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1244 memory_order_seq_cst,
1245 memory_order_relaxed))
1246 __newval = __oldval + __i;
1250 template<
typename _Tp>
1252 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1254 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1255 _Val<_Tp> __newval = __oldval - __i;
1256 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1257 memory_order_seq_cst,
1258 memory_order_relaxed))
1259 __newval = __oldval - __i;
1265 template<
typename _Fp>
1266 struct __atomic_float
1268 static_assert(is_floating_point_v<_Fp>);
1270 static constexpr size_t _S_alignment = __alignof__(_Fp);
1273 using value_type = _Fp;
1274 using difference_type = value_type;
1276 static constexpr bool is_always_lock_free
1277 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1279 __atomic_float() =
default;
1282 __atomic_float(_Fp __t) : _M_fp(__t)
1283 { __atomic_impl::__clear_padding(_M_fp); }
1285 __atomic_float(
const __atomic_float&) =
delete;
1286 __atomic_float& operator=(
const __atomic_float&) =
delete;
1287 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1290 operator=(_Fp __t)
volatile noexcept
1297 operator=(_Fp __t)
noexcept
1304 is_lock_free() const volatile noexcept
1305 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1308 is_lock_free() const noexcept
1309 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1312 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1313 { __atomic_impl::store(&_M_fp, __t, __m); }
1316 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1317 { __atomic_impl::store(&_M_fp, __t, __m); }
1320 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1321 {
return __atomic_impl::load(&_M_fp, __m); }
1324 load(memory_order __m = memory_order_seq_cst)
const noexcept
1325 {
return __atomic_impl::load(&_M_fp, __m); }
1327 operator _Fp() const volatile noexcept {
return this->load(); }
1328 operator _Fp() const noexcept {
return this->load(); }
1331 exchange(_Fp __desired,
1332 memory_order __m = memory_order_seq_cst)
volatile noexcept
1333 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1336 exchange(_Fp __desired,
1337 memory_order __m = memory_order_seq_cst)
noexcept
1338 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1341 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1342 memory_order __success,
1343 memory_order __failure)
noexcept
1345 return __atomic_impl::compare_exchange_weak(&_M_fp,
1346 __expected, __desired,
1347 __success, __failure);
1351 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1352 memory_order __success,
1353 memory_order __failure)
volatile noexcept
1355 return __atomic_impl::compare_exchange_weak(&_M_fp,
1356 __expected, __desired,
1357 __success, __failure);
1361 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1362 memory_order __success,
1363 memory_order __failure)
noexcept
1365 return __atomic_impl::compare_exchange_strong(&_M_fp,
1366 __expected, __desired,
1367 __success, __failure);
1371 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1372 memory_order __success,
1373 memory_order __failure)
volatile noexcept
1375 return __atomic_impl::compare_exchange_strong(&_M_fp,
1376 __expected, __desired,
1377 __success, __failure);
1381 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1382 memory_order __order = memory_order_seq_cst)
1385 return compare_exchange_weak(__expected, __desired, __order,
1386 __cmpexch_failure_order(__order));
1390 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1391 memory_order __order = memory_order_seq_cst)
1394 return compare_exchange_weak(__expected, __desired, __order,
1395 __cmpexch_failure_order(__order));
1399 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1400 memory_order __order = memory_order_seq_cst)
1403 return compare_exchange_strong(__expected, __desired, __order,
1404 __cmpexch_failure_order(__order));
1408 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1409 memory_order __order = memory_order_seq_cst)
1412 return compare_exchange_strong(__expected, __desired, __order,
1413 __cmpexch_failure_order(__order));
1416#if __glibcxx_atomic_wait
1417 _GLIBCXX_ALWAYS_INLINE
void
1418 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1419 { __atomic_impl::wait(&_M_fp, __old, __m); }
1423 _GLIBCXX_ALWAYS_INLINE
void
1424 notify_one() const noexcept
1425 { __atomic_impl::notify_one(&_M_fp); }
1429 _GLIBCXX_ALWAYS_INLINE
void
1430 notify_all() const noexcept
1431 { __atomic_impl::notify_all(&_M_fp); }
1437 fetch_add(value_type __i,
1438 memory_order __m = memory_order_seq_cst)
noexcept
1439 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1442 fetch_add(value_type __i,
1443 memory_order __m = memory_order_seq_cst)
volatile noexcept
1444 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1447 fetch_sub(value_type __i,
1448 memory_order __m = memory_order_seq_cst)
noexcept
1449 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1452 fetch_sub(value_type __i,
1453 memory_order __m = memory_order_seq_cst)
volatile noexcept
1454 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1457 operator+=(value_type __i)
noexcept
1458 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1461 operator+=(value_type __i)
volatile noexcept
1462 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1465 operator-=(value_type __i)
noexcept
1466 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1469 operator-=(value_type __i)
volatile noexcept
1470 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1473 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1475#undef _GLIBCXX20_INIT
1477 template<
typename _Tp,
1478 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1479 bool = is_floating_point_v<_Tp>>
1480 struct __atomic_ref;
1483 template<
typename _Tp>
1484 struct __atomic_ref<_Tp, false, false>
1486 static_assert(is_trivially_copyable_v<_Tp>);
1489 static constexpr int _S_min_alignment
1490 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1494 using value_type = _Tp;
1496 static constexpr bool is_always_lock_free
1497 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1499 static constexpr size_t required_alignment
1500 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1502 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1507 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1510 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1513 operator=(_Tp __t)
const noexcept
1519 operator _Tp() const noexcept {
return this->load(); }
1522 is_lock_free() const noexcept
1523 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1526 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1527 { __atomic_impl::store(_M_ptr, __t, __m); }
1530 load(memory_order __m = memory_order_seq_cst)
const noexcept
1531 {
return __atomic_impl::load(_M_ptr, __m); }
1534 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1536 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1539 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1540 memory_order __success,
1541 memory_order __failure)
const noexcept
1543 return __atomic_impl::compare_exchange_weak<true>(
1544 _M_ptr, __expected, __desired, __success, __failure);
1548 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1549 memory_order __success,
1550 memory_order __failure)
const noexcept
1552 return __atomic_impl::compare_exchange_strong<true>(
1553 _M_ptr, __expected, __desired, __success, __failure);
1557 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1558 memory_order __order = memory_order_seq_cst)
1561 return compare_exchange_weak(__expected, __desired, __order,
1562 __cmpexch_failure_order(__order));
1566 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1567 memory_order __order = memory_order_seq_cst)
1570 return compare_exchange_strong(__expected, __desired, __order,
1571 __cmpexch_failure_order(__order));
1574#if __glibcxx_atomic_wait
1575 _GLIBCXX_ALWAYS_INLINE
void
1576 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1577 { __atomic_impl::wait(_M_ptr, __old, __m); }
1581 _GLIBCXX_ALWAYS_INLINE
void
1582 notify_one() const noexcept
1583 { __atomic_impl::notify_one(_M_ptr); }
1587 _GLIBCXX_ALWAYS_INLINE
void
1588 notify_all() const noexcept
1589 { __atomic_impl::notify_all(_M_ptr); }
1599 template<
typename _Tp>
1600 struct __atomic_ref<_Tp, true, false>
1602 static_assert(is_integral_v<_Tp>);
1605 using value_type = _Tp;
1606 using difference_type = value_type;
1608 static constexpr bool is_always_lock_free
1609 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1611 static constexpr size_t required_alignment
1612 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1614 __atomic_ref() =
delete;
1615 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1618 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1620 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1623 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1626 operator=(_Tp __t)
const noexcept
1632 operator _Tp() const noexcept {
return this->load(); }
1635 is_lock_free() const noexcept
1637 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1641 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1642 { __atomic_impl::store(_M_ptr, __t, __m); }
1645 load(memory_order __m = memory_order_seq_cst)
const noexcept
1646 {
return __atomic_impl::load(_M_ptr, __m); }
1649 exchange(_Tp __desired,
1650 memory_order __m = memory_order_seq_cst)
const noexcept
1651 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1654 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1655 memory_order __success,
1656 memory_order __failure)
const noexcept
1658 return __atomic_impl::compare_exchange_weak<true>(
1659 _M_ptr, __expected, __desired, __success, __failure);
1663 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1664 memory_order __success,
1665 memory_order __failure)
const noexcept
1667 return __atomic_impl::compare_exchange_strong<true>(
1668 _M_ptr, __expected, __desired, __success, __failure);
1672 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1673 memory_order __order = memory_order_seq_cst)
1676 return compare_exchange_weak(__expected, __desired, __order,
1677 __cmpexch_failure_order(__order));
1681 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1682 memory_order __order = memory_order_seq_cst)
1685 return compare_exchange_strong(__expected, __desired, __order,
1686 __cmpexch_failure_order(__order));
1689#if __glibcxx_atomic_wait
1690 _GLIBCXX_ALWAYS_INLINE
void
1691 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1692 { __atomic_impl::wait(_M_ptr, __old, __m); }
1696 _GLIBCXX_ALWAYS_INLINE
void
1697 notify_one() const noexcept
1698 { __atomic_impl::notify_one(_M_ptr); }
1702 _GLIBCXX_ALWAYS_INLINE
void
1703 notify_all() const noexcept
1704 { __atomic_impl::notify_all(_M_ptr); }
1710 fetch_add(value_type __i,
1711 memory_order __m = memory_order_seq_cst)
const noexcept
1712 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1715 fetch_sub(value_type __i,
1716 memory_order __m = memory_order_seq_cst)
const noexcept
1717 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1720 fetch_and(value_type __i,
1721 memory_order __m = memory_order_seq_cst)
const noexcept
1722 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1725 fetch_or(value_type __i,
1726 memory_order __m = memory_order_seq_cst)
const noexcept
1727 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1730 fetch_xor(value_type __i,
1731 memory_order __m = memory_order_seq_cst)
const noexcept
1732 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1734 _GLIBCXX_ALWAYS_INLINE value_type
1735 operator++(
int)
const noexcept
1736 {
return fetch_add(1); }
1738 _GLIBCXX_ALWAYS_INLINE value_type
1739 operator--(
int)
const noexcept
1740 {
return fetch_sub(1); }
1743 operator++() const noexcept
1744 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1747 operator--() const noexcept
1748 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1751 operator+=(value_type __i)
const noexcept
1752 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1755 operator-=(value_type __i)
const noexcept
1756 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1759 operator&=(value_type __i)
const noexcept
1760 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1763 operator|=(value_type __i)
const noexcept
1764 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1767 operator^=(value_type __i)
const noexcept
1768 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1775 template<
typename _Fp>
1776 struct __atomic_ref<_Fp, false, true>
1778 static_assert(is_floating_point_v<_Fp>);
1781 using value_type = _Fp;
1782 using difference_type = value_type;
1784 static constexpr bool is_always_lock_free
1785 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1787 static constexpr size_t required_alignment = __alignof__(_Fp);
1789 __atomic_ref() =
delete;
1790 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1793 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1795 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1798 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1801 operator=(_Fp __t)
const noexcept
1807 operator _Fp() const noexcept {
return this->load(); }
1810 is_lock_free() const noexcept
1812 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1816 store(_Fp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1817 { __atomic_impl::store(_M_ptr, __t, __m); }
1820 load(memory_order __m = memory_order_seq_cst)
const noexcept
1821 {
return __atomic_impl::load(_M_ptr, __m); }
1824 exchange(_Fp __desired,
1825 memory_order __m = memory_order_seq_cst)
const noexcept
1826 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1829 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1830 memory_order __success,
1831 memory_order __failure)
const noexcept
1833 return __atomic_impl::compare_exchange_weak<true>(
1834 _M_ptr, __expected, __desired, __success, __failure);
1838 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1839 memory_order __success,
1840 memory_order __failure)
const noexcept
1842 return __atomic_impl::compare_exchange_strong<true>(
1843 _M_ptr, __expected, __desired, __success, __failure);
1847 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1848 memory_order __order = memory_order_seq_cst)
1851 return compare_exchange_weak(__expected, __desired, __order,
1852 __cmpexch_failure_order(__order));
1856 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1857 memory_order __order = memory_order_seq_cst)
1860 return compare_exchange_strong(__expected, __desired, __order,
1861 __cmpexch_failure_order(__order));
1864#if __glibcxx_atomic_wait
1865 _GLIBCXX_ALWAYS_INLINE
void
1866 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1867 { __atomic_impl::wait(_M_ptr, __old, __m); }
1871 _GLIBCXX_ALWAYS_INLINE
void
1872 notify_one() const noexcept
1873 { __atomic_impl::notify_one(_M_ptr); }
1877 _GLIBCXX_ALWAYS_INLINE
void
1878 notify_all() const noexcept
1879 { __atomic_impl::notify_all(_M_ptr); }
1885 fetch_add(value_type __i,
1886 memory_order __m = memory_order_seq_cst)
const noexcept
1887 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1890 fetch_sub(value_type __i,
1891 memory_order __m = memory_order_seq_cst)
const noexcept
1892 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1895 operator+=(value_type __i)
const noexcept
1896 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1899 operator-=(value_type __i)
const noexcept
1900 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1907 template<
typename _Tp>
1908 struct __atomic_ref<_Tp*,
false,
false>
1911 using value_type = _Tp*;
1912 using difference_type = ptrdiff_t;
1914 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1916 static constexpr size_t required_alignment = __alignof__(_Tp*);
1918 __atomic_ref() =
delete;
1919 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1924 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1927 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1930 operator=(_Tp* __t)
const noexcept
1936 operator _Tp*()
const noexcept {
return this->load(); }
1939 is_lock_free() const noexcept
1941 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1945 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1946 { __atomic_impl::store(_M_ptr, __t, __m); }
1949 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1950 {
return __atomic_impl::load(_M_ptr, __m); }
1953 exchange(_Tp* __desired,
1954 memory_order __m = memory_order_seq_cst)
const noexcept
1955 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1958 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1962 return __atomic_impl::compare_exchange_weak<true>(
1963 _M_ptr, __expected, __desired, __success, __failure);
1967 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1971 return __atomic_impl::compare_exchange_strong<true>(
1972 _M_ptr, __expected, __desired, __success, __failure);
1976 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1980 return compare_exchange_weak(__expected, __desired, __order,
1981 __cmpexch_failure_order(__order));
1985 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1989 return compare_exchange_strong(__expected, __desired, __order,
1990 __cmpexch_failure_order(__order));
1993#if __glibcxx_atomic_wait
1994 _GLIBCXX_ALWAYS_INLINE
void
1995 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1996 { __atomic_impl::wait(_M_ptr, __old, __m); }
2000 _GLIBCXX_ALWAYS_INLINE
void
2001 notify_one() const noexcept
2002 { __atomic_impl::notify_one(_M_ptr); }
2006 _GLIBCXX_ALWAYS_INLINE
void
2007 notify_all() const noexcept
2008 { __atomic_impl::notify_all(_M_ptr); }
2013 _GLIBCXX_ALWAYS_INLINE value_type
2014 fetch_add(difference_type __d,
2015 memory_order __m = memory_order_seq_cst)
const noexcept
2016 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2018 _GLIBCXX_ALWAYS_INLINE value_type
2019 fetch_sub(difference_type __d,
2020 memory_order __m = memory_order_seq_cst)
const noexcept
2021 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2024 operator++(
int)
const noexcept
2025 {
return fetch_add(1); }
2028 operator--(
int)
const noexcept
2029 {
return fetch_sub(1); }
2032 operator++() const noexcept
2034 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2038 operator--() const noexcept
2040 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2044 operator+=(difference_type __d)
const noexcept
2046 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2050 operator-=(difference_type __d)
const noexcept
2052 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2056 static constexpr ptrdiff_t
2057 _S_type_size(ptrdiff_t __d)
noexcept
2059 static_assert(is_object_v<_Tp>);
2060 return __d *
sizeof(_Tp);
2071_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.