30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
34#pragma GCC system_header
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
52namespace std _GLIBCXX_VISIBILITY(default)
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
64#if __cplusplus > 201703L
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
94 enum __memory_order_modifier
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
133 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
140 template<
typename _IntTp>
141 struct __atomic_base;
145 _GLIBCXX_ALWAYS_INLINE
void
147 { __atomic_thread_fence(
int(__m)); }
149 _GLIBCXX_ALWAYS_INLINE
void
151 { __atomic_signal_fence(
int(__m)); }
154 template<
typename _Tp>
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
166# define _GLIBCXX20_INIT(I)
170#define ATOMIC_VAR_INIT(_VI) { _VI }
172 template<
typename _Tp>
175 template<
typename _Tp>
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
182 typedef unsigned char __atomic_flag_data_type;
197 _GLIBCXX_BEGIN_EXTERN_C
199 struct __atomic_flag_base
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
204 _GLIBCXX_END_EXTERN_C
208#define ATOMIC_FLAG_INIT { 0 }
211 struct atomic_flag :
public __atomic_flag_base
213 atomic_flag()
noexcept =
default;
214 ~atomic_flag()
noexcept =
default;
215 atomic_flag(
const atomic_flag&) =
delete;
216 atomic_flag& operator=(
const atomic_flag&) =
delete;
217 atomic_flag& operator=(
const atomic_flag&)
volatile =
delete;
220 constexpr atomic_flag(
bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
224 _GLIBCXX_ALWAYS_INLINE
bool
225 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
227 return __atomic_test_and_set (&_M_i,
int(__m));
230 _GLIBCXX_ALWAYS_INLINE
bool
231 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
233 return __atomic_test_and_set (&_M_i,
int(__m));
236#ifdef __glibcxx_atomic_flag_test
237 _GLIBCXX_ALWAYS_INLINE
bool
238 test(
memory_order __m = memory_order_seq_cst)
const noexcept
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v,
int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
245 _GLIBCXX_ALWAYS_INLINE
bool
246 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v,
int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
254#if __glibcxx_atomic_wait
255 _GLIBCXX_ALWAYS_INLINE
void
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
268 _GLIBCXX_ALWAYS_INLINE
void
269 notify_one()
noexcept
270 { std::__atomic_notify_address(&_M_i,
false); }
274 _GLIBCXX_ALWAYS_INLINE
void
275 notify_all()
noexcept
276 { std::__atomic_notify_address(&_M_i,
true); }
281 _GLIBCXX_ALWAYS_INLINE
void
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
290 __atomic_clear (&_M_i,
int(__m));
293 _GLIBCXX_ALWAYS_INLINE
void
294 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
302 __atomic_clear (&_M_i,
int(__m));
306 static constexpr __atomic_flag_data_type
308 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
337 template<
typename _ITp>
344 typedef _ITp __int_type;
346 static constexpr int _S_alignment =
347 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
349 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
352 __atomic_base() noexcept = default;
353 ~__atomic_base() noexcept = default;
354 __atomic_base(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) = delete;
356 __atomic_base& operator=(const __atomic_base&) volatile = delete;
358 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
360 operator __int_type() const noexcept
363 operator __int_type() const volatile noexcept
367 operator=(__int_type __i)
noexcept
374 operator=(__int_type __i)
volatile noexcept
381 operator++(
int)
noexcept
382 {
return fetch_add(1); }
385 operator++(
int)
volatile noexcept
386 {
return fetch_add(1); }
389 operator--(
int)
noexcept
390 {
return fetch_sub(1); }
393 operator--(
int)
volatile noexcept
394 {
return fetch_sub(1); }
397 operator++() noexcept
398 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
401 operator++() volatile noexcept
402 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
405 operator--() noexcept
406 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
409 operator--() volatile noexcept
410 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
413 operator+=(__int_type __i)
noexcept
414 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
417 operator+=(__int_type __i)
volatile noexcept
418 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
421 operator-=(__int_type __i)
noexcept
422 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
425 operator-=(__int_type __i)
volatile noexcept
426 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
429 operator&=(__int_type __i)
noexcept
430 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
433 operator&=(__int_type __i)
volatile noexcept
434 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
437 operator|=(__int_type __i)
noexcept
438 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
441 operator|=(__int_type __i)
volatile noexcept
442 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
445 operator^=(__int_type __i)
noexcept
446 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
449 operator^=(__int_type __i)
volatile noexcept
450 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
453 is_lock_free() const noexcept
456 return __atomic_is_lock_free(
sizeof(_M_i),
457 reinterpret_cast<void *
>(-_S_alignment));
461 is_lock_free() const volatile noexcept
464 return __atomic_is_lock_free(
sizeof(_M_i),
465 reinterpret_cast<void *
>(-_S_alignment));
468 _GLIBCXX_ALWAYS_INLINE
void
469 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
472 = __m & __memory_order_mask;
473 __glibcxx_assert(__b != memory_order_acquire);
474 __glibcxx_assert(__b != memory_order_acq_rel);
475 __glibcxx_assert(__b != memory_order_consume);
477 __atomic_store_n(&_M_i, __i,
int(__m));
480 _GLIBCXX_ALWAYS_INLINE
void
481 store(__int_type __i,
482 memory_order __m = memory_order_seq_cst)
volatile noexcept
485 = __m & __memory_order_mask;
486 __glibcxx_assert(__b != memory_order_acquire);
487 __glibcxx_assert(__b != memory_order_acq_rel);
488 __glibcxx_assert(__b != memory_order_consume);
490 __atomic_store_n(&_M_i, __i,
int(__m));
493 _GLIBCXX_ALWAYS_INLINE __int_type
494 load(memory_order __m = memory_order_seq_cst)
const noexcept
497 = __m & __memory_order_mask;
498 __glibcxx_assert(__b != memory_order_release);
499 __glibcxx_assert(__b != memory_order_acq_rel);
501 return __atomic_load_n(&_M_i,
int(__m));
504 _GLIBCXX_ALWAYS_INLINE __int_type
505 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
508 = __m & __memory_order_mask;
509 __glibcxx_assert(__b != memory_order_release);
510 __glibcxx_assert(__b != memory_order_acq_rel);
512 return __atomic_load_n(&_M_i,
int(__m));
515 _GLIBCXX_ALWAYS_INLINE __int_type
516 exchange(__int_type __i,
517 memory_order __m = memory_order_seq_cst)
noexcept
519 return __atomic_exchange_n(&_M_i, __i,
int(__m));
523 _GLIBCXX_ALWAYS_INLINE __int_type
524 exchange(__int_type __i,
525 memory_order __m = memory_order_seq_cst)
volatile noexcept
527 return __atomic_exchange_n(&_M_i, __i,
int(__m));
530 _GLIBCXX_ALWAYS_INLINE
bool
531 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 memory_order __m1, memory_order __m2)
noexcept
534 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
536 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
537 int(__m1),
int(__m2));
540 _GLIBCXX_ALWAYS_INLINE
bool
541 compare_exchange_weak(__int_type& __i1, __int_type __i2,
543 memory_order __m2)
volatile noexcept
545 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
547 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
548 int(__m1),
int(__m2));
551 _GLIBCXX_ALWAYS_INLINE
bool
552 compare_exchange_weak(__int_type& __i1, __int_type __i2,
553 memory_order __m = memory_order_seq_cst)
noexcept
555 return compare_exchange_weak(__i1, __i2, __m,
556 __cmpexch_failure_order(__m));
559 _GLIBCXX_ALWAYS_INLINE
bool
560 compare_exchange_weak(__int_type& __i1, __int_type __i2,
561 memory_order __m = memory_order_seq_cst)
volatile noexcept
563 return compare_exchange_weak(__i1, __i2, __m,
564 __cmpexch_failure_order(__m));
567 _GLIBCXX_ALWAYS_INLINE
bool
568 compare_exchange_strong(__int_type& __i1, __int_type __i2,
569 memory_order __m1, memory_order __m2)
noexcept
571 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
573 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
574 int(__m1),
int(__m2));
577 _GLIBCXX_ALWAYS_INLINE
bool
578 compare_exchange_strong(__int_type& __i1, __int_type __i2,
580 memory_order __m2)
volatile noexcept
582 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
584 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
585 int(__m1),
int(__m2));
588 _GLIBCXX_ALWAYS_INLINE
bool
589 compare_exchange_strong(__int_type& __i1, __int_type __i2,
590 memory_order __m = memory_order_seq_cst)
noexcept
592 return compare_exchange_strong(__i1, __i2, __m,
593 __cmpexch_failure_order(__m));
596 _GLIBCXX_ALWAYS_INLINE
bool
597 compare_exchange_strong(__int_type& __i1, __int_type __i2,
598 memory_order __m = memory_order_seq_cst)
volatile noexcept
600 return compare_exchange_strong(__i1, __i2, __m,
601 __cmpexch_failure_order(__m));
604#if __glibcxx_atomic_wait
605 _GLIBCXX_ALWAYS_INLINE
void
606 wait(__int_type __old,
607 memory_order __m = memory_order_seq_cst)
const noexcept
609 std::__atomic_wait_address_v(&_M_i, __old,
610 [__m,
this] {
return this->load(__m); });
615 _GLIBCXX_ALWAYS_INLINE
void
616 notify_one() noexcept
617 { std::__atomic_notify_address(&_M_i,
false); }
621 _GLIBCXX_ALWAYS_INLINE
void
622 notify_all() noexcept
623 { std::__atomic_notify_address(&_M_i,
true); }
628 _GLIBCXX_ALWAYS_INLINE __int_type
629 fetch_add(__int_type __i,
630 memory_order __m = memory_order_seq_cst)
noexcept
631 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
633 _GLIBCXX_ALWAYS_INLINE __int_type
634 fetch_add(__int_type __i,
635 memory_order __m = memory_order_seq_cst)
volatile noexcept
636 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
638 _GLIBCXX_ALWAYS_INLINE __int_type
639 fetch_sub(__int_type __i,
640 memory_order __m = memory_order_seq_cst)
noexcept
641 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
643 _GLIBCXX_ALWAYS_INLINE __int_type
644 fetch_sub(__int_type __i,
645 memory_order __m = memory_order_seq_cst)
volatile noexcept
646 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
648 _GLIBCXX_ALWAYS_INLINE __int_type
649 fetch_and(__int_type __i,
650 memory_order __m = memory_order_seq_cst)
noexcept
651 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
653 _GLIBCXX_ALWAYS_INLINE __int_type
654 fetch_and(__int_type __i,
655 memory_order __m = memory_order_seq_cst)
volatile noexcept
656 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
658 _GLIBCXX_ALWAYS_INLINE __int_type
659 fetch_or(__int_type __i,
660 memory_order __m = memory_order_seq_cst)
noexcept
661 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
663 _GLIBCXX_ALWAYS_INLINE __int_type
664 fetch_or(__int_type __i,
665 memory_order __m = memory_order_seq_cst)
volatile noexcept
666 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
668 _GLIBCXX_ALWAYS_INLINE __int_type
669 fetch_xor(__int_type __i,
670 memory_order __m = memory_order_seq_cst)
noexcept
671 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
673 _GLIBCXX_ALWAYS_INLINE __int_type
674 fetch_xor(__int_type __i,
675 memory_order __m = memory_order_seq_cst)
volatile noexcept
676 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
681 template<
typename _PTp>
682 struct __atomic_base<_PTp*>
685 typedef _PTp* __pointer_type;
687 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
689 static constexpr ptrdiff_t
690 _S_type_size(ptrdiff_t __d)
691 {
return __d *
sizeof(_PTp); }
694 __atomic_base() noexcept = default;
695 ~__atomic_base() noexcept = default;
696 __atomic_base(const __atomic_base&) = delete;
697 __atomic_base& operator=(const __atomic_base&) = delete;
698 __atomic_base& operator=(const __atomic_base&) volatile = delete;
701 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
703 operator __pointer_type() const noexcept
706 operator __pointer_type() const volatile noexcept
710 operator=(__pointer_type __p)
noexcept
717 operator=(__pointer_type __p)
volatile noexcept
724 operator++(
int)
noexcept
725 {
return fetch_add(1); }
728 operator++(
int)
volatile noexcept
729 {
return fetch_add(1); }
732 operator--(
int)
noexcept
733 {
return fetch_sub(1); }
736 operator--(
int)
volatile noexcept
737 {
return fetch_sub(1); }
740 operator++() noexcept
741 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
742 int(memory_order_seq_cst)); }
745 operator++() volatile noexcept
746 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
747 int(memory_order_seq_cst)); }
750 operator--() noexcept
751 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
752 int(memory_order_seq_cst)); }
755 operator--() volatile noexcept
756 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
757 int(memory_order_seq_cst)); }
760 operator+=(ptrdiff_t __d)
noexcept
761 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
762 int(memory_order_seq_cst)); }
765 operator+=(ptrdiff_t __d)
volatile noexcept
766 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
767 int(memory_order_seq_cst)); }
770 operator-=(ptrdiff_t __d)
noexcept
771 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
772 int(memory_order_seq_cst)); }
775 operator-=(ptrdiff_t __d)
volatile noexcept
776 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
777 int(memory_order_seq_cst)); }
780 is_lock_free() const noexcept
783 return __atomic_is_lock_free(
sizeof(_M_p),
784 reinterpret_cast<void *
>(-__alignof(_M_p)));
788 is_lock_free() const volatile noexcept
791 return __atomic_is_lock_free(
sizeof(_M_p),
792 reinterpret_cast<void *
>(-__alignof(_M_p)));
795 _GLIBCXX_ALWAYS_INLINE
void
796 store(__pointer_type __p,
800 = __m & __memory_order_mask;
802 __glibcxx_assert(__b != memory_order_acquire);
803 __glibcxx_assert(__b != memory_order_acq_rel);
804 __glibcxx_assert(__b != memory_order_consume);
806 __atomic_store_n(&_M_p, __p,
int(__m));
809 _GLIBCXX_ALWAYS_INLINE
void
810 store(__pointer_type __p,
811 memory_order __m = memory_order_seq_cst)
volatile noexcept
814 = __m & __memory_order_mask;
815 __glibcxx_assert(__b != memory_order_acquire);
816 __glibcxx_assert(__b != memory_order_acq_rel);
817 __glibcxx_assert(__b != memory_order_consume);
819 __atomic_store_n(&_M_p, __p,
int(__m));
822 _GLIBCXX_ALWAYS_INLINE __pointer_type
823 load(
memory_order __m = memory_order_seq_cst)
const noexcept
826 = __m & __memory_order_mask;
827 __glibcxx_assert(__b != memory_order_release);
828 __glibcxx_assert(__b != memory_order_acq_rel);
830 return __atomic_load_n(&_M_p,
int(__m));
833 _GLIBCXX_ALWAYS_INLINE __pointer_type
834 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
837 = __m & __memory_order_mask;
838 __glibcxx_assert(__b != memory_order_release);
839 __glibcxx_assert(__b != memory_order_acq_rel);
841 return __atomic_load_n(&_M_p,
int(__m));
844 _GLIBCXX_ALWAYS_INLINE __pointer_type
845 exchange(__pointer_type __p,
848 return __atomic_exchange_n(&_M_p, __p,
int(__m));
852 _GLIBCXX_ALWAYS_INLINE __pointer_type
853 exchange(__pointer_type __p,
854 memory_order __m = memory_order_seq_cst)
volatile noexcept
856 return __atomic_exchange_n(&_M_p, __p,
int(__m));
859 _GLIBCXX_ALWAYS_INLINE
bool
860 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
864 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
866 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
867 int(__m1),
int(__m2));
870 _GLIBCXX_ALWAYS_INLINE
bool
871 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
875 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
877 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
878 int(__m1),
int(__m2));
881 _GLIBCXX_ALWAYS_INLINE
bool
882 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
886 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
888 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
889 int(__m1),
int(__m2));
892 _GLIBCXX_ALWAYS_INLINE
bool
893 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
897 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
899 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
900 int(__m1),
int(__m2));
903#if __glibcxx_atomic_wait
904 _GLIBCXX_ALWAYS_INLINE
void
905 wait(__pointer_type __old,
908 std::__atomic_wait_address_v(&_M_p, __old,
910 {
return this->load(__m); });
915 _GLIBCXX_ALWAYS_INLINE
void
916 notify_one() const noexcept
917 { std::__atomic_notify_address(&_M_p,
false); }
921 _GLIBCXX_ALWAYS_INLINE
void
922 notify_all() const noexcept
923 { std::__atomic_notify_address(&_M_p,
true); }
928 _GLIBCXX_ALWAYS_INLINE __pointer_type
929 fetch_add(ptrdiff_t __d,
931 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
933 _GLIBCXX_ALWAYS_INLINE __pointer_type
934 fetch_add(ptrdiff_t __d,
935 memory_order __m = memory_order_seq_cst)
volatile noexcept
936 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
938 _GLIBCXX_ALWAYS_INLINE __pointer_type
939 fetch_sub(ptrdiff_t __d,
941 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
943 _GLIBCXX_ALWAYS_INLINE __pointer_type
944 fetch_sub(ptrdiff_t __d,
945 memory_order __m = memory_order_seq_cst)
volatile noexcept
946 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
949 namespace __atomic_impl
953 template<
typename _Tp>
955 __maybe_has_padding()
957#if ! __has_builtin(__builtin_clear_padding)
959#elif __has_builtin(__has_unique_object_representations)
960 return !__has_unique_object_representations(_Tp)
961 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
967#pragma GCC diagnostic push
968#pragma GCC diagnostic ignored "-Wc++17-extensions"
970 template<
typename _Tp>
971 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
972 __clear_padding(_Tp& __val)
noexcept
975#if __has_builtin(__builtin_clear_padding)
976 if constexpr (__atomic_impl::__maybe_has_padding<_Tp>())
977 __builtin_clear_padding(__ptr);
983 template<
typename _Tp>
984 using _Val =
typename remove_volatile<_Tp>::type;
986 template<
bool _AtomicRef = false,
typename _Tp>
987 _GLIBCXX_ALWAYS_INLINE
bool
988 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
990 memory_order __s, memory_order __f)
noexcept
992 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
994 using _Vp = _Val<_Tp>;
997 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1001 int(__s),
int(__f));
1003 else if constexpr (!_AtomicRef)
1006 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1010 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1014 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1015 __is_weak,
int(__s),
int(__f)))
1024 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1030 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1047 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1048 __is_weak,
int(__s),
int(__f)))
1055 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1056 __atomic_impl::__clear_padding(__curr),
1067#pragma GCC diagnostic pop
1070#if __cplusplus > 201703L
1072 namespace __atomic_impl
1075 template<
typename _Tp>
1076 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1078 template<
size_t _Size,
size_t _Align>
1079 _GLIBCXX_ALWAYS_INLINE
bool
1080 is_lock_free() noexcept
1083 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1086 template<
typename _Tp>
1087 _GLIBCXX_ALWAYS_INLINE
void
1088 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1090 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1093 template<
typename _Tp>
1094 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1095 load(
const _Tp* __ptr, memory_order __m)
noexcept
1097 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1098 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1099 __atomic_load(__ptr, __dest,
int(__m));
1103 template<
typename _Tp>
1104 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1105 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1107 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1108 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1109 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1114 template<
bool _AtomicRef = false,
typename _Tp>
1115 _GLIBCXX_ALWAYS_INLINE
bool
1116 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1117 _Val<_Tp> __desired, memory_order __success,
1118 memory_order __failure)
noexcept
1120 return __atomic_impl::__compare_exchange<_AtomicRef>(
1121 *__ptr, __expected, __desired,
true, __success, __failure);
1124 template<
bool _AtomicRef = false,
typename _Tp>
1125 _GLIBCXX_ALWAYS_INLINE
bool
1126 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1127 _Val<_Tp> __desired, memory_order __success,
1128 memory_order __failure)
noexcept
1130 return __atomic_impl::__compare_exchange<_AtomicRef>(
1131 *__ptr, __expected, __desired,
false, __success, __failure);
1134#if __glibcxx_atomic_wait
1135 template<
typename _Tp>
1136 _GLIBCXX_ALWAYS_INLINE
void
1137 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1138 memory_order __m = memory_order_seq_cst)
noexcept
1140 std::__atomic_wait_address_v(__ptr, __old,
1141 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1146 template<
typename _Tp>
1147 _GLIBCXX_ALWAYS_INLINE
void
1148 notify_one(
const _Tp* __ptr)
noexcept
1149 { std::__atomic_notify_address(__ptr,
false); }
1153 template<
typename _Tp>
1154 _GLIBCXX_ALWAYS_INLINE
void
1155 notify_all(
const _Tp* __ptr)
noexcept
1156 { std::__atomic_notify_address(__ptr,
true); }
1161 template<
typename _Tp>
1162 _GLIBCXX_ALWAYS_INLINE _Tp
1163 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1164 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1166 template<
typename _Tp>
1167 _GLIBCXX_ALWAYS_INLINE _Tp
1168 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1169 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1171 template<
typename _Tp>
1172 _GLIBCXX_ALWAYS_INLINE _Tp
1173 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1174 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1176 template<
typename _Tp>
1177 _GLIBCXX_ALWAYS_INLINE _Tp
1178 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1179 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1181 template<
typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE _Tp
1183 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1184 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1186 template<
typename _Tp>
1187 _GLIBCXX_ALWAYS_INLINE _Tp
1188 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1189 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1191 template<
typename _Tp>
1192 _GLIBCXX_ALWAYS_INLINE _Tp
1193 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1194 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1196 template<
typename _Tp>
1197 _GLIBCXX_ALWAYS_INLINE _Tp
1198 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1199 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1201 template<
typename _Tp>
1202 _GLIBCXX_ALWAYS_INLINE _Tp
1203 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1204 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1206 template<
typename _Tp>
1207 _GLIBCXX_ALWAYS_INLINE _Tp
1208 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1209 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1211 template<
typename _Tp>
1212 concept __atomic_fetch_addable
1213 =
requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1215 template<
typename _Tp>
1217 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1219 if constexpr (__atomic_fetch_addable<_Tp>)
1220 return __atomic_fetch_add(__ptr, __i,
int(__m));
1223 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1224 _Val<_Tp> __newval = __oldval + __i;
1225 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1226 memory_order_relaxed))
1227 __newval = __oldval + __i;
1232 template<
typename _Tp>
1233 concept __atomic_fetch_subtractable
1234 =
requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1236 template<
typename _Tp>
1238 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1240 if constexpr (__atomic_fetch_subtractable<_Tp>)
1241 return __atomic_fetch_sub(__ptr, __i,
int(__m));
1244 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1245 _Val<_Tp> __newval = __oldval - __i;
1246 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1247 memory_order_relaxed))
1248 __newval = __oldval - __i;
1253 template<
typename _Tp>
1254 concept __atomic_add_fetchable
1255 =
requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1257 template<
typename _Tp>
1259 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1261 if constexpr (__atomic_add_fetchable<_Tp>)
1262 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1265 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1266 _Val<_Tp> __newval = __oldval + __i;
1267 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1268 memory_order_seq_cst,
1269 memory_order_relaxed))
1270 __newval = __oldval + __i;
1275 template<
typename _Tp>
1276 concept __atomic_sub_fetchable
1277 =
requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1279 template<
typename _Tp>
1281 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1283 if constexpr (__atomic_sub_fetchable<_Tp>)
1284 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1287 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1288 _Val<_Tp> __newval = __oldval - __i;
1289 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1290 memory_order_seq_cst,
1291 memory_order_relaxed))
1292 __newval = __oldval - __i;
1299 template<
typename _Fp>
1300 struct __atomic_float
1302 static_assert(is_floating_point_v<_Fp>);
1304 static constexpr size_t _S_alignment = __alignof__(_Fp);
1307 using value_type = _Fp;
1308 using difference_type = value_type;
1310 static constexpr bool is_always_lock_free
1311 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1313 __atomic_float() =
default;
1316 __atomic_float(_Fp __t) : _M_fp(__t)
1318 if (!std::__is_constant_evaluated())
1319 __atomic_impl::__clear_padding(_M_fp);
1322 __atomic_float(
const __atomic_float&) =
delete;
1323 __atomic_float& operator=(
const __atomic_float&) =
delete;
1324 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1327 operator=(_Fp __t)
volatile noexcept
1334 operator=(_Fp __t)
noexcept
1341 is_lock_free() const volatile noexcept
1342 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1345 is_lock_free() const noexcept
1346 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1349 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1350 { __atomic_impl::store(&_M_fp, __t, __m); }
1353 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1354 { __atomic_impl::store(&_M_fp, __t, __m); }
1357 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1358 {
return __atomic_impl::load(&_M_fp, __m); }
1361 load(memory_order __m = memory_order_seq_cst)
const noexcept
1362 {
return __atomic_impl::load(&_M_fp, __m); }
1364 operator _Fp() const volatile noexcept {
return this->load(); }
1365 operator _Fp() const noexcept {
return this->load(); }
1368 exchange(_Fp __desired,
1369 memory_order __m = memory_order_seq_cst)
volatile noexcept
1370 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1373 exchange(_Fp __desired,
1374 memory_order __m = memory_order_seq_cst)
noexcept
1375 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1378 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1379 memory_order __success,
1380 memory_order __failure)
noexcept
1382 return __atomic_impl::compare_exchange_weak(&_M_fp,
1383 __expected, __desired,
1384 __success, __failure);
1388 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1389 memory_order __success,
1390 memory_order __failure)
volatile noexcept
1392 return __atomic_impl::compare_exchange_weak(&_M_fp,
1393 __expected, __desired,
1394 __success, __failure);
1398 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1399 memory_order __success,
1400 memory_order __failure)
noexcept
1402 return __atomic_impl::compare_exchange_strong(&_M_fp,
1403 __expected, __desired,
1404 __success, __failure);
1408 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1409 memory_order __success,
1410 memory_order __failure)
volatile noexcept
1412 return __atomic_impl::compare_exchange_strong(&_M_fp,
1413 __expected, __desired,
1414 __success, __failure);
1418 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1419 memory_order __order = memory_order_seq_cst)
1422 return compare_exchange_weak(__expected, __desired, __order,
1423 __cmpexch_failure_order(__order));
1427 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1428 memory_order __order = memory_order_seq_cst)
1431 return compare_exchange_weak(__expected, __desired, __order,
1432 __cmpexch_failure_order(__order));
1436 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1437 memory_order __order = memory_order_seq_cst)
1440 return compare_exchange_strong(__expected, __desired, __order,
1441 __cmpexch_failure_order(__order));
1445 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1446 memory_order __order = memory_order_seq_cst)
1449 return compare_exchange_strong(__expected, __desired, __order,
1450 __cmpexch_failure_order(__order));
1453#if __glibcxx_atomic_wait
1454 _GLIBCXX_ALWAYS_INLINE
void
1455 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1456 { __atomic_impl::wait(&_M_fp, __old, __m); }
1460 _GLIBCXX_ALWAYS_INLINE
void
1461 notify_one() const noexcept
1462 { __atomic_impl::notify_one(&_M_fp); }
1466 _GLIBCXX_ALWAYS_INLINE
void
1467 notify_all() const noexcept
1468 { __atomic_impl::notify_all(&_M_fp); }
1474 fetch_add(value_type __i,
1475 memory_order __m = memory_order_seq_cst)
noexcept
1476 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1479 fetch_add(value_type __i,
1480 memory_order __m = memory_order_seq_cst)
volatile noexcept
1481 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1484 fetch_sub(value_type __i,
1485 memory_order __m = memory_order_seq_cst)
noexcept
1486 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1489 fetch_sub(value_type __i,
1490 memory_order __m = memory_order_seq_cst)
volatile noexcept
1491 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1494 operator+=(value_type __i)
noexcept
1495 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1498 operator+=(value_type __i)
volatile noexcept
1499 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1502 operator-=(value_type __i)
noexcept
1503 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1506 operator-=(value_type __i)
volatile noexcept
1507 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1510 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1512#undef _GLIBCXX20_INIT
1514 template<
typename _Tp,
1515 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1516 bool = is_floating_point_v<_Tp>>
1517 struct __atomic_ref;
1520 template<
typename _Tp>
1521 struct __atomic_ref<_Tp, false, false>
1523 static_assert(is_trivially_copyable_v<_Tp>);
1526 static constexpr int _S_min_alignment
1527 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1531 using value_type = _Tp;
1533 static constexpr bool is_always_lock_free
1534 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1536 static constexpr size_t required_alignment
1537 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1539 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1542 __atomic_ref(_Tp& __t) : _M_ptr(std::
__addressof(__t))
1544 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1547 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1550 operator=(_Tp __t)
const noexcept
1556 operator _Tp() const noexcept {
return this->load(); }
1559 is_lock_free() const noexcept
1560 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1563 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1564 { __atomic_impl::store(_M_ptr, __t, __m); }
1567 load(memory_order __m = memory_order_seq_cst)
const noexcept
1568 {
return __atomic_impl::load(_M_ptr, __m); }
1571 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1573 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1576 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1577 memory_order __success,
1578 memory_order __failure)
const noexcept
1580 return __atomic_impl::compare_exchange_weak<true>(
1581 _M_ptr, __expected, __desired, __success, __failure);
1585 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1586 memory_order __success,
1587 memory_order __failure)
const noexcept
1589 return __atomic_impl::compare_exchange_strong<true>(
1590 _M_ptr, __expected, __desired, __success, __failure);
1594 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1595 memory_order __order = memory_order_seq_cst)
1598 return compare_exchange_weak(__expected, __desired, __order,
1599 __cmpexch_failure_order(__order));
1603 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1604 memory_order __order = memory_order_seq_cst)
1607 return compare_exchange_strong(__expected, __desired, __order,
1608 __cmpexch_failure_order(__order));
1611#if __glibcxx_atomic_wait
1612 _GLIBCXX_ALWAYS_INLINE
void
1613 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1614 { __atomic_impl::wait(_M_ptr, __old, __m); }
1618 _GLIBCXX_ALWAYS_INLINE
void
1619 notify_one() const noexcept
1620 { __atomic_impl::notify_one(_M_ptr); }
1624 _GLIBCXX_ALWAYS_INLINE
void
1625 notify_all() const noexcept
1626 { __atomic_impl::notify_all(_M_ptr); }
1636 template<
typename _Tp>
1637 struct __atomic_ref<_Tp, true, false>
1639 static_assert(is_integral_v<_Tp>);
1642 using value_type = _Tp;
1643 using difference_type = value_type;
1645 static constexpr bool is_always_lock_free
1646 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1648 static constexpr size_t required_alignment
1649 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1651 __atomic_ref() =
delete;
1652 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1655 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1657 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1660 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1663 operator=(_Tp __t)
const noexcept
1669 operator _Tp() const noexcept {
return this->load(); }
1672 is_lock_free() const noexcept
1674 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1678 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1679 { __atomic_impl::store(_M_ptr, __t, __m); }
1682 load(memory_order __m = memory_order_seq_cst)
const noexcept
1683 {
return __atomic_impl::load(_M_ptr, __m); }
1686 exchange(_Tp __desired,
1687 memory_order __m = memory_order_seq_cst)
const noexcept
1688 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1691 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1692 memory_order __success,
1693 memory_order __failure)
const noexcept
1695 return __atomic_impl::compare_exchange_weak<true>(
1696 _M_ptr, __expected, __desired, __success, __failure);
1700 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1701 memory_order __success,
1702 memory_order __failure)
const noexcept
1704 return __atomic_impl::compare_exchange_strong<true>(
1705 _M_ptr, __expected, __desired, __success, __failure);
1709 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1710 memory_order __order = memory_order_seq_cst)
1713 return compare_exchange_weak(__expected, __desired, __order,
1714 __cmpexch_failure_order(__order));
1718 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1719 memory_order __order = memory_order_seq_cst)
1722 return compare_exchange_strong(__expected, __desired, __order,
1723 __cmpexch_failure_order(__order));
1726#if __glibcxx_atomic_wait
1727 _GLIBCXX_ALWAYS_INLINE
void
1728 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1729 { __atomic_impl::wait(_M_ptr, __old, __m); }
1733 _GLIBCXX_ALWAYS_INLINE
void
1734 notify_one() const noexcept
1735 { __atomic_impl::notify_one(_M_ptr); }
1739 _GLIBCXX_ALWAYS_INLINE
void
1740 notify_all() const noexcept
1741 { __atomic_impl::notify_all(_M_ptr); }
1747 fetch_add(value_type __i,
1748 memory_order __m = memory_order_seq_cst)
const noexcept
1749 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1752 fetch_sub(value_type __i,
1753 memory_order __m = memory_order_seq_cst)
const noexcept
1754 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1757 fetch_and(value_type __i,
1758 memory_order __m = memory_order_seq_cst)
const noexcept
1759 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1762 fetch_or(value_type __i,
1763 memory_order __m = memory_order_seq_cst)
const noexcept
1764 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1767 fetch_xor(value_type __i,
1768 memory_order __m = memory_order_seq_cst)
const noexcept
1769 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1771 _GLIBCXX_ALWAYS_INLINE value_type
1772 operator++(
int)
const noexcept
1773 {
return fetch_add(1); }
1775 _GLIBCXX_ALWAYS_INLINE value_type
1776 operator--(
int)
const noexcept
1777 {
return fetch_sub(1); }
1780 operator++() const noexcept
1781 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1784 operator--() const noexcept
1785 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1788 operator+=(value_type __i)
const noexcept
1789 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1792 operator-=(value_type __i)
const noexcept
1793 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1796 operator&=(value_type __i)
const noexcept
1797 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1800 operator|=(value_type __i)
const noexcept
1801 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1804 operator^=(value_type __i)
const noexcept
1805 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1812 template<
typename _Fp>
1813 struct __atomic_ref<_Fp, false, true>
1815 static_assert(is_floating_point_v<_Fp>);
1818 using value_type = _Fp;
1819 using difference_type = value_type;
1821 static constexpr bool is_always_lock_free
1822 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1824 static constexpr size_t required_alignment = __alignof__(_Fp);
1826 __atomic_ref() =
delete;
1827 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1830 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1832 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1835 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1838 operator=(_Fp __t)
const noexcept
1844 operator _Fp() const noexcept {
return this->load(); }
1847 is_lock_free() const noexcept
1849 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1853 store(_Fp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1854 { __atomic_impl::store(_M_ptr, __t, __m); }
1857 load(memory_order __m = memory_order_seq_cst)
const noexcept
1858 {
return __atomic_impl::load(_M_ptr, __m); }
1861 exchange(_Fp __desired,
1862 memory_order __m = memory_order_seq_cst)
const noexcept
1863 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1866 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1867 memory_order __success,
1868 memory_order __failure)
const noexcept
1870 return __atomic_impl::compare_exchange_weak<true>(
1871 _M_ptr, __expected, __desired, __success, __failure);
1875 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1876 memory_order __success,
1877 memory_order __failure)
const noexcept
1879 return __atomic_impl::compare_exchange_strong<true>(
1880 _M_ptr, __expected, __desired, __success, __failure);
1884 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1885 memory_order __order = memory_order_seq_cst)
1888 return compare_exchange_weak(__expected, __desired, __order,
1889 __cmpexch_failure_order(__order));
1893 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1894 memory_order __order = memory_order_seq_cst)
1897 return compare_exchange_strong(__expected, __desired, __order,
1898 __cmpexch_failure_order(__order));
1901#if __glibcxx_atomic_wait
1902 _GLIBCXX_ALWAYS_INLINE
void
1903 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1904 { __atomic_impl::wait(_M_ptr, __old, __m); }
1908 _GLIBCXX_ALWAYS_INLINE
void
1909 notify_one() const noexcept
1910 { __atomic_impl::notify_one(_M_ptr); }
1914 _GLIBCXX_ALWAYS_INLINE
void
1915 notify_all() const noexcept
1916 { __atomic_impl::notify_all(_M_ptr); }
1922 fetch_add(value_type __i,
1923 memory_order __m = memory_order_seq_cst)
const noexcept
1924 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1927 fetch_sub(value_type __i,
1928 memory_order __m = memory_order_seq_cst)
const noexcept
1929 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1932 operator+=(value_type __i)
const noexcept
1933 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1936 operator-=(value_type __i)
const noexcept
1937 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1944 template<
typename _Tp>
1945 struct __atomic_ref<_Tp*,
false,
false>
1951 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1953 static constexpr size_t required_alignment = __alignof__(_Tp*);
1955 __atomic_ref() =
delete;
1956 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1959 __atomic_ref(_Tp*& __t) : _M_ptr(std::
__addressof(__t))
1961 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1964 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1967 operator=(_Tp* __t)
const noexcept
1973 operator _Tp*()
const noexcept {
return this->load(); }
1976 is_lock_free() const noexcept
1978 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1982 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1983 { __atomic_impl::store(_M_ptr, __t, __m); }
1986 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1987 {
return __atomic_impl::load(_M_ptr, __m); }
1990 exchange(_Tp* __desired,
1991 memory_order __m = memory_order_seq_cst)
const noexcept
1992 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1995 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1999 return __atomic_impl::compare_exchange_weak<true>(
2000 _M_ptr, __expected, __desired, __success, __failure);
2004 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2008 return __atomic_impl::compare_exchange_strong<true>(
2009 _M_ptr, __expected, __desired, __success, __failure);
2013 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
2017 return compare_exchange_weak(__expected, __desired, __order,
2018 __cmpexch_failure_order(__order));
2022 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2026 return compare_exchange_strong(__expected, __desired, __order,
2027 __cmpexch_failure_order(__order));
2030#if __glibcxx_atomic_wait
2031 _GLIBCXX_ALWAYS_INLINE
void
2032 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
2033 { __atomic_impl::wait(_M_ptr, __old, __m); }
2037 _GLIBCXX_ALWAYS_INLINE
void
2038 notify_one() const noexcept
2039 { __atomic_impl::notify_one(_M_ptr); }
2043 _GLIBCXX_ALWAYS_INLINE
void
2044 notify_all() const noexcept
2045 { __atomic_impl::notify_all(_M_ptr); }
2052 memory_order __m = memory_order_seq_cst)
const noexcept
2053 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2057 memory_order __m = memory_order_seq_cst)
const noexcept
2058 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2061 operator++(
int)
const noexcept
2062 {
return fetch_add(1); }
2065 operator--(
int)
const noexcept
2066 {
return fetch_sub(1); }
2069 operator++() const noexcept
2071 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2075 operator--() const noexcept
2077 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2083 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2089 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2093 static constexpr ptrdiff_t
2094 _S_type_size(ptrdiff_t __d)
noexcept
2096 static_assert(is_object_v<_Tp>);
2097 return __d *
sizeof(_Tp);
2108_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.