]> gcc.gnu.org Git - gcc.git/blob - libstdc++-v3/include/bits/atomic_0.h
* include/bits/atomic_0.h (_ATOMIC_CMPEXCHNG_): Use C-style cast.
[gcc.git] / libstdc++-v3 / include / bits / atomic_0.h
1 // -*- C++ -*- header.
2
3 // Copyright (C) 2008, 2009, 2010
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25
26 /** @file bits/atomic_0.h
27 * This is an internal header file, included by other library headers.
28 * You should not attempt to use it directly.
29 */
30
31 #ifndef _GLIBCXX_ATOMIC_0_H
32 #define _GLIBCXX_ATOMIC_0_H 1
33
34 #pragma GCC system_header
35
36 _GLIBCXX_BEGIN_NAMESPACE(std)
37
38 // 0 == __atomic0 == Never lock-free
39 namespace __atomic0
40 {
41 _GLIBCXX_BEGIN_EXTERN_C
42
43 void
44 atomic_flag_clear_explicit(__atomic_flag_base*, memory_order)
45 _GLIBCXX_NOTHROW;
46
47 void
48 __atomic_flag_wait_explicit(__atomic_flag_base*, memory_order)
49 _GLIBCXX_NOTHROW;
50
51 _GLIBCXX_CONST __atomic_flag_base*
52 __atomic_flag_for_address(const volatile void* __z) _GLIBCXX_NOTHROW;
53
54 _GLIBCXX_END_EXTERN_C
55
56 // Implementation specific defines.
57 #define _ATOMIC_MEMBER_ _M_i
58
59 // Implementation specific defines.
60 #define _ATOMIC_LOAD_(__a, __x) \
61 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
62 __i_type* __p = &_ATOMIC_MEMBER_; \
63 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
64 __atomic_flag_wait_explicit(__g, __x); \
65 __i_type __r = *__p; \
66 atomic_flag_clear_explicit(__g, __x); \
67 __r; })
68
69 #define _ATOMIC_STORE_(__a, __m, __x) \
70 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
71 __i_type* __p = &_ATOMIC_MEMBER_; \
72 __typeof__(__m) __v = (__m); \
73 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
74 __atomic_flag_wait_explicit(__g, __x); \
75 *__p = __v; \
76 atomic_flag_clear_explicit(__g, __x); \
77 __v; })
78
79 #define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
80 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
81 __i_type* __p = &_ATOMIC_MEMBER_; \
82 __typeof__(__m) __v = (__m); \
83 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
84 __atomic_flag_wait_explicit(__g, __x); \
85 __i_type __r = *__p; \
86 *__p __o __v; \
87 atomic_flag_clear_explicit(__g, __x); \
88 __r; })
89
90 #define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
91 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
92 __i_type* __p = &_ATOMIC_MEMBER_; \
93 __typeof__(__e) __q = (__e); \
94 __typeof__(__m) __v = (__m); \
95 bool __r; \
96 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
97 __atomic_flag_wait_explicit(__g, __x); \
98 __i_type __t = *__p; \
99 if (*__q == __t) \
100 { \
101 *__p = (__i_type)__v; \
102 __r = true; \
103 } \
104 else { *__q = __t; __r = false; } \
105 atomic_flag_clear_explicit(__g, __x); \
106 __r; })
107
108
109 /// atomic_flag
110 struct atomic_flag : public __atomic_flag_base
111 {
112 atomic_flag() = default;
113 ~atomic_flag() = default;
114 atomic_flag(const atomic_flag&) = delete;
115 atomic_flag& operator=(const atomic_flag&) = delete;
116 atomic_flag& operator=(const atomic_flag&) volatile = delete;
117
118 // Conversion to ATOMIC_FLAG_INIT.
119 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
120
121 bool
122 test_and_set(memory_order __m = memory_order_seq_cst);
123
124 bool
125 test_and_set(memory_order __m = memory_order_seq_cst) volatile;
126
127 void
128 clear(memory_order __m = memory_order_seq_cst);
129
130 void
131 clear(memory_order __m = memory_order_seq_cst) volatile;
132 };
133
134
135 /// atomic_address
136 struct atomic_address
137 {
138 private:
139 void* _M_i;
140
141 public:
142 atomic_address() = default;
143 ~atomic_address() = default;
144 atomic_address(const atomic_address&) = delete;
145 atomic_address& operator=(const atomic_address&) = delete;
146 atomic_address& operator=(const atomic_address&) volatile = delete;
147
148 constexpr atomic_address(void* __v): _M_i (__v) { }
149
150 bool
151 is_lock_free() const { return false; }
152
153 bool
154 is_lock_free() const volatile { return false; }
155
156 void
157 store(void* __v, memory_order __m = memory_order_seq_cst)
158 {
159 __glibcxx_assert(__m != memory_order_acquire);
160 __glibcxx_assert(__m != memory_order_acq_rel);
161 __glibcxx_assert(__m != memory_order_consume);
162 _ATOMIC_STORE_(this, __v, __m);
163 }
164
165 void
166 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
167 {
168 __glibcxx_assert(__m != memory_order_acquire);
169 __glibcxx_assert(__m != memory_order_acq_rel);
170 __glibcxx_assert(__m != memory_order_consume);
171 _ATOMIC_STORE_(this, __v, __m);
172 }
173
174 void*
175 load(memory_order __m = memory_order_seq_cst) const
176 {
177 __glibcxx_assert(__m != memory_order_release);
178 __glibcxx_assert(__m != memory_order_acq_rel);
179 return _ATOMIC_LOAD_(this, __m);
180 }
181
182 void*
183 load(memory_order __m = memory_order_seq_cst) const volatile
184 {
185 __glibcxx_assert(__m != memory_order_release);
186 __glibcxx_assert(__m != memory_order_acq_rel);
187 return _ATOMIC_LOAD_(this, __m);
188 }
189
190 void*
191 exchange(void* __v, memory_order __m = memory_order_seq_cst)
192 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
193
194 void*
195 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
196 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
197
198 bool
199 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
200 memory_order __m2)
201 {
202 __glibcxx_assert(__m2 != memory_order_release);
203 __glibcxx_assert(__m2 != memory_order_acq_rel);
204 __glibcxx_assert(__m2 <= __m1);
205 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
206 }
207
208 bool
209 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
210 memory_order __m2) volatile
211 {
212 __glibcxx_assert(__m2 != memory_order_release);
213 __glibcxx_assert(__m2 != memory_order_acq_rel);
214 __glibcxx_assert(__m2 <= __m1);
215 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
216 }
217
218 bool
219 compare_exchange_weak(void*& __v1, void* __v2,
220 memory_order __m = memory_order_seq_cst)
221 {
222 return compare_exchange_weak(__v1, __v2, __m,
223 __calculate_memory_order(__m));
224 }
225
226 bool
227 compare_exchange_weak(void*& __v1, void* __v2,
228 memory_order __m = memory_order_seq_cst) volatile
229 {
230 return compare_exchange_weak(__v1, __v2, __m,
231 __calculate_memory_order(__m));
232 }
233
234 bool
235 compare_exchange_weak(const void*& __v1, const void* __v2,
236 memory_order __m1, memory_order __m2)
237 {
238 __glibcxx_assert(__m2 != memory_order_release);
239 __glibcxx_assert(__m2 != memory_order_acq_rel);
240 __glibcxx_assert(__m2 <= __m1);
241 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
242 }
243
244 bool
245 compare_exchange_weak(const void*& __v1, const void* __v2,
246 memory_order __m1, memory_order __m2) volatile
247 {
248 __glibcxx_assert(__m2 != memory_order_release);
249 __glibcxx_assert(__m2 != memory_order_acq_rel);
250 __glibcxx_assert(__m2 <= __m1);
251 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
252 }
253
254 bool
255 compare_exchange_weak(const void*& __v1, const void* __v2,
256 memory_order __m = memory_order_seq_cst)
257 {
258 return compare_exchange_weak(__v1, __v2, __m,
259 __calculate_memory_order(__m));
260 }
261
262 bool
263 compare_exchange_weak(const void*& __v1, const void* __v2,
264 memory_order __m = memory_order_seq_cst) volatile
265 {
266 return compare_exchange_weak(__v1, __v2, __m,
267 __calculate_memory_order(__m));
268 }
269
270 bool
271 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
272 memory_order __m2)
273 {
274 __glibcxx_assert(__m2 != memory_order_release);
275 __glibcxx_assert(__m2 != memory_order_acq_rel);
276 __glibcxx_assert(__m2 <= __m1);
277 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
278 }
279
280 bool
281 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
282 memory_order __m2) volatile
283 {
284 __glibcxx_assert(__m2 != memory_order_release);
285 __glibcxx_assert(__m2 != memory_order_acq_rel);
286 __glibcxx_assert(__m2 <= __m1);
287 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
288 }
289
290 bool
291 compare_exchange_strong(void*& __v1, void* __v2,
292 memory_order __m = memory_order_seq_cst)
293 {
294 return compare_exchange_strong(__v1, __v2, __m,
295 __calculate_memory_order(__m));
296 }
297
298 bool
299 compare_exchange_strong(void*& __v1, void* __v2,
300 memory_order __m = memory_order_seq_cst) volatile
301 {
302 return compare_exchange_strong(__v1, __v2, __m,
303 __calculate_memory_order(__m));
304 }
305
306 bool
307 compare_exchange_strong(const void*& __v1, const void* __v2,
308 memory_order __m1, memory_order __m2)
309 {
310 __glibcxx_assert(__m2 != memory_order_release);
311 __glibcxx_assert(__m2 != memory_order_acq_rel);
312 __glibcxx_assert(__m2 <= __m1);
313 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
314 }
315
316 bool
317 compare_exchange_strong(const void*& __v1, const void* __v2,
318 memory_order __m1, memory_order __m2) volatile
319 {
320 __glibcxx_assert(__m2 != memory_order_release);
321 __glibcxx_assert(__m2 != memory_order_acq_rel);
322 __glibcxx_assert(__m2 <= __m1);
323 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
324 }
325
326 bool
327 compare_exchange_strong(const void*& __v1, const void* __v2,
328 memory_order __m = memory_order_seq_cst)
329 {
330 return compare_exchange_strong(__v1, __v2, __m,
331 __calculate_memory_order(__m));
332 }
333
334 bool
335 compare_exchange_strong(const void*& __v1, const void* __v2,
336 memory_order __m = memory_order_seq_cst) volatile
337 {
338 return compare_exchange_strong(__v1, __v2, __m,
339 __calculate_memory_order(__m));
340 }
341
342 void*
343 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
344 {
345 void** __p = &(_M_i);
346 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
347 __atomic_flag_wait_explicit(__g, __m);
348 void* __r = *__p;
349 *__p = (void*)((char*)(*__p) + __d);
350 atomic_flag_clear_explicit(__g, __m);
351 return __r;
352 }
353
354 void*
355 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
356 {
357 void* volatile* __p = &(_M_i);
358 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
359 __atomic_flag_wait_explicit(__g, __m);
360 void* __r = *__p;
361 *__p = (void*)((char*)(*__p) + __d);
362 atomic_flag_clear_explicit(__g, __m);
363 return __r;
364 }
365
366 void*
367 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
368 {
369 void** __p = &(_M_i);
370 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
371 __atomic_flag_wait_explicit(__g, __m);
372 void* __r = *__p;
373 *__p = (void*)((char*)(*__p) - __d);
374 atomic_flag_clear_explicit(__g, __m);
375 return __r;
376 }
377
378 void*
379 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
380 {
381 void* volatile* __p = &(_M_i);
382 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
383 __atomic_flag_wait_explicit(__g, __m);
384 void* __r = *__p;
385 *__p = (void*)((char*)(*__p) - __d);
386 atomic_flag_clear_explicit(__g, __m);
387 return __r;
388 }
389
390 operator void*() const
391 { return load(); }
392
393 operator void*() const volatile
394 { return load(); }
395
396 // XXX
397 void*
398 operator=(void* __v)
399 {
400 store(__v);
401 return __v;
402 }
403
404 void*
405 operator=(void* __v) volatile
406 {
407 store(__v);
408 return __v;
409 }
410
411 void*
412 operator+=(ptrdiff_t __d)
413 { return fetch_add(__d) + __d; }
414
415 void*
416 operator+=(ptrdiff_t __d) volatile
417 { return fetch_add(__d) + __d; }
418
419 void*
420 operator-=(ptrdiff_t __d)
421 { return fetch_sub(__d) - __d; }
422
423 void*
424 operator-=(ptrdiff_t __d) volatile
425 { return fetch_sub(__d) - __d; }
426 };
427
428
429 /// Base class for atomic integrals.
430 //
431 // For each of the integral types, define atomic_[integral type] struct
432 //
433 // atomic_bool bool
434 // atomic_char char
435 // atomic_schar signed char
436 // atomic_uchar unsigned char
437 // atomic_short short
438 // atomic_ushort unsigned short
439 // atomic_int int
440 // atomic_uint unsigned int
441 // atomic_long long
442 // atomic_ulong unsigned long
443 // atomic_llong long long
444 // atomic_ullong unsigned long long
445 // atomic_char16_t char16_t
446 // atomic_char32_t char32_t
447 // atomic_wchar_t wchar_t
448
449 // Base type.
450 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
451 // since that is what GCC built-in functions for atomic memory access work on.
452 template<typename _ITp>
453 struct __atomic_base
454 {
455 private:
456 typedef _ITp __int_type;
457
458 __int_type _M_i;
459
460 public:
461 __atomic_base() = default;
462 ~__atomic_base() = default;
463 __atomic_base(const __atomic_base&) = delete;
464 __atomic_base& operator=(const __atomic_base&) = delete;
465 __atomic_base& operator=(const __atomic_base&) volatile = delete;
466
467 // Requires __int_type convertible to _M_base._M_i.
468 constexpr __atomic_base(__int_type __i): _M_i (__i) { }
469
470 operator __int_type() const
471 { return load(); }
472
473 operator __int_type() const volatile
474 { return load(); }
475
476 __int_type
477 operator=(__int_type __i)
478 {
479 store(__i);
480 return __i;
481 }
482
483 __int_type
484 operator=(__int_type __i) volatile
485 {
486 store(__i);
487 return __i;
488 }
489
490 __int_type
491 operator++(int)
492 { return fetch_add(1); }
493
494 __int_type
495 operator++(int) volatile
496 { return fetch_add(1); }
497
498 __int_type
499 operator--(int)
500 { return fetch_sub(1); }
501
502 __int_type
503 operator--(int) volatile
504 { return fetch_sub(1); }
505
506 __int_type
507 operator++()
508 { return fetch_add(1) + 1; }
509
510 __int_type
511 operator++() volatile
512 { return fetch_add(1) + 1; }
513
514 __int_type
515 operator--()
516 { return fetch_sub(1) - 1; }
517
518 __int_type
519 operator--() volatile
520 { return fetch_sub(1) - 1; }
521
522 __int_type
523 operator+=(__int_type __i)
524 { return fetch_add(__i) + __i; }
525
526 __int_type
527 operator+=(__int_type __i) volatile
528 { return fetch_add(__i) + __i; }
529
530 __int_type
531 operator-=(__int_type __i)
532 { return fetch_sub(__i) - __i; }
533
534 __int_type
535 operator-=(__int_type __i) volatile
536 { return fetch_sub(__i) - __i; }
537
538 __int_type
539 operator&=(__int_type __i)
540 { return fetch_and(__i) & __i; }
541
542 __int_type
543 operator&=(__int_type __i) volatile
544 { return fetch_and(__i) & __i; }
545
546 __int_type
547 operator|=(__int_type __i)
548 { return fetch_or(__i) | __i; }
549
550 __int_type
551 operator|=(__int_type __i) volatile
552 { return fetch_or(__i) | __i; }
553
554 __int_type
555 operator^=(__int_type __i)
556 { return fetch_xor(__i) ^ __i; }
557
558 __int_type
559 operator^=(__int_type __i) volatile
560 { return fetch_xor(__i) ^ __i; }
561
562 bool
563 is_lock_free() const
564 { return false; }
565
566 bool
567 is_lock_free() const volatile
568 { return false; }
569
570 void
571 store(__int_type __i, memory_order __m = memory_order_seq_cst)
572 {
573 __glibcxx_assert(__m != memory_order_acquire);
574 __glibcxx_assert(__m != memory_order_acq_rel);
575 __glibcxx_assert(__m != memory_order_consume);
576 _ATOMIC_STORE_(this, __i, __m);
577 }
578
579 void
580 store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
581 {
582 __glibcxx_assert(__m != memory_order_acquire);
583 __glibcxx_assert(__m != memory_order_acq_rel);
584 __glibcxx_assert(__m != memory_order_consume);
585 _ATOMIC_STORE_(this, __i, __m);
586 }
587
588 __int_type
589 load(memory_order __m = memory_order_seq_cst) const
590 {
591 __glibcxx_assert(__m != memory_order_release);
592 __glibcxx_assert(__m != memory_order_acq_rel);
593 return _ATOMIC_LOAD_(this, __m);
594 }
595
596 __int_type
597 load(memory_order __m = memory_order_seq_cst) const volatile
598 {
599 __glibcxx_assert(__m != memory_order_release);
600 __glibcxx_assert(__m != memory_order_acq_rel);
601 return _ATOMIC_LOAD_(this, __m);
602 }
603
604 __int_type
605 exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
606 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
607
608 __int_type
609 exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
610 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
611
612 bool
613 compare_exchange_weak(__int_type& __i1, __int_type __i2,
614 memory_order __m1, memory_order __m2)
615 {
616 __glibcxx_assert(__m2 != memory_order_release);
617 __glibcxx_assert(__m2 != memory_order_acq_rel);
618 __glibcxx_assert(__m2 <= __m1);
619 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
620 }
621
622 bool
623 compare_exchange_weak(__int_type& __i1, __int_type __i2,
624 memory_order __m1, memory_order __m2) volatile
625 {
626 __glibcxx_assert(__m2 != memory_order_release);
627 __glibcxx_assert(__m2 != memory_order_acq_rel);
628 __glibcxx_assert(__m2 <= __m1);
629 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
630 }
631
632 bool
633 compare_exchange_weak(__int_type& __i1, __int_type __i2,
634 memory_order __m = memory_order_seq_cst)
635 {
636 return compare_exchange_weak(__i1, __i2, __m,
637 __calculate_memory_order(__m));
638 }
639
640 bool
641 compare_exchange_weak(__int_type& __i1, __int_type __i2,
642 memory_order __m = memory_order_seq_cst) volatile
643 {
644 return compare_exchange_weak(__i1, __i2, __m,
645 __calculate_memory_order(__m));
646 }
647
648 bool
649 compare_exchange_strong(__int_type& __i1, __int_type __i2,
650 memory_order __m1, memory_order __m2)
651 {
652 __glibcxx_assert(__m2 != memory_order_release);
653 __glibcxx_assert(__m2 != memory_order_acq_rel);
654 __glibcxx_assert(__m2 <= __m1);
655 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
656 }
657
658 bool
659 compare_exchange_strong(__int_type& __i1, __int_type __i2,
660 memory_order __m1, memory_order __m2) volatile
661 {
662 __glibcxx_assert(__m2 != memory_order_release);
663 __glibcxx_assert(__m2 != memory_order_acq_rel);
664 __glibcxx_assert(__m2 <= __m1);
665 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
666 }
667
668 bool
669 compare_exchange_strong(__int_type& __i1, __int_type __i2,
670 memory_order __m = memory_order_seq_cst)
671 {
672 return compare_exchange_strong(__i1, __i2, __m,
673 __calculate_memory_order(__m));
674 }
675
676 bool
677 compare_exchange_strong(__int_type& __i1, __int_type __i2,
678 memory_order __m = memory_order_seq_cst) volatile
679 {
680 return compare_exchange_strong(__i1, __i2, __m,
681 __calculate_memory_order(__m));
682 }
683
684 __int_type
685 fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
686 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
687
688 __int_type
689 fetch_add(__int_type __i,
690 memory_order __m = memory_order_seq_cst) volatile
691 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
692
693 __int_type
694 fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
695 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
696
697 __int_type
698 fetch_sub(__int_type __i,
699 memory_order __m = memory_order_seq_cst) volatile
700 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
701
702 __int_type
703 fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
704 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
705
706 __int_type
707 fetch_and(__int_type __i,
708 memory_order __m = memory_order_seq_cst) volatile
709 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
710
711 __int_type
712 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
713 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
714
715 __int_type
716 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
717 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
718
719 __int_type
720 fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
721 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
722
723 __int_type
724 fetch_xor(__int_type __i,
725 memory_order __m = memory_order_seq_cst) volatile
726 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
727 };
728
729 #undef _ATOMIC_LOAD_
730 #undef _ATOMIC_STORE_
731 #undef _ATOMIC_MODIFY_
732 #undef _ATOMIC_CMPEXCHNG_
733 } // namespace __atomic0
734
735 _GLIBCXX_END_NAMESPACE
736
737 #endif
This page took 0.069274 seconds and 5 git commands to generate.