libstdc++
stop_token
Go to the documentation of this file.
1// <stop_token> -*- C++ -*-
2
3// Copyright (C) 2019-2022 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/stop_token
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_STOP_TOKEN
30#define _GLIBCXX_STOP_TOKEN
31
32#include <bits/requires_hosted.h> // concurrency
33
34#if __cplusplus > 201703L
35
36#include <atomic>
37#include <bits/std_thread.h>
38
39#include <semaphore>
40
41#define __cpp_lib_jthread 201911L
42
43namespace std _GLIBCXX_VISIBILITY(default)
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /// Tag type indicating a stop_source should have no shared-stop-state.
48 struct nostopstate_t { explicit nostopstate_t() = default; };
49 inline constexpr nostopstate_t nostopstate{};
50
51 class stop_source;
52
53 /// Allow testing whether a stop request has been made on a `stop_source`.
55 {
56 public:
57 stop_token() noexcept = default;
58
59 stop_token(const stop_token&) noexcept = default;
60 stop_token(stop_token&&) noexcept = default;
61
62 ~stop_token() = default;
63
65 operator=(const stop_token&) noexcept = default;
66
68 operator=(stop_token&&) noexcept = default;
69
70 [[nodiscard]]
71 bool
72 stop_possible() const noexcept
73 {
74 return static_cast<bool>(_M_state) && _M_state->_M_stop_possible();
75 }
76
77 [[nodiscard]]
78 bool
79 stop_requested() const noexcept
80 {
81 return static_cast<bool>(_M_state) && _M_state->_M_stop_requested();
82 }
83
84 void
85 swap(stop_token& __rhs) noexcept
86 { _M_state.swap(__rhs._M_state); }
87
88 [[nodiscard]]
89 friend bool
90 operator==(const stop_token& __a, const stop_token& __b)
91 { return __a._M_state == __b._M_state; }
92
93 friend void
94 swap(stop_token& __lhs, stop_token& __rhs) noexcept
95 { __lhs.swap(__rhs); }
96
97 private:
98 friend class stop_source;
99 template<typename _Callback>
100 friend class stop_callback;
101
102 static void
103 _S_yield() noexcept
104 {
105#if defined __i386__ || defined __x86_64__
106 __builtin_ia32_pause();
107#endif
109 }
110
111#ifndef __cpp_lib_semaphore
112 struct binary_semaphore
113 {
114 explicit binary_semaphore(int __d) : _M_counter(__d > 0) { }
115
116 void release() { _M_counter.fetch_add(1, memory_order::release); }
117
118 void acquire()
119 {
120 int __old = 1;
121 while (!_M_counter.compare_exchange_weak(__old, 0,
122 memory_order::acquire,
123 memory_order::relaxed))
124 {
125 __old = 1;
126 _S_yield();
127 }
128 }
129
130 atomic<int> _M_counter;
131 };
132#endif
133
134 struct _Stop_cb
135 {
136 using __cb_type = void(_Stop_cb*) noexcept;
137 __cb_type* _M_callback;
138 _Stop_cb* _M_prev = nullptr;
139 _Stop_cb* _M_next = nullptr;
140 bool* _M_destroyed = nullptr;
141 binary_semaphore _M_done{0};
142
143 [[__gnu__::__nonnull__]]
144 explicit
145 _Stop_cb(__cb_type* __cb)
146 : _M_callback(__cb)
147 { }
148
149 void _M_run() noexcept { _M_callback(this); }
150 };
151
152 struct _Stop_state_t
153 {
154 using value_type = uint32_t;
155 static constexpr value_type _S_stop_requested_bit = 1;
156 static constexpr value_type _S_locked_bit = 2;
157 static constexpr value_type _S_ssrc_counter_inc = 4;
158
159 std::atomic<value_type> _M_owners{1};
160 std::atomic<value_type> _M_value{_S_ssrc_counter_inc};
161 _Stop_cb* _M_head = nullptr;
162 std::thread::id _M_requester;
163
164 _Stop_state_t() = default;
165
166 bool
167 _M_stop_possible() noexcept
168 {
169 // true if a stop request has already been made or there are still
170 // stop_source objects that would allow one to be made.
171 return _M_value.load(memory_order::acquire) & ~_S_locked_bit;
172 }
173
174 bool
175 _M_stop_requested() noexcept
176 {
177 return _M_value.load(memory_order::acquire) & _S_stop_requested_bit;
178 }
179
180 void
181 _M_add_owner() noexcept
182 {
183 _M_owners.fetch_add(1, memory_order::relaxed);
184 }
185
186 void
187 _M_release_ownership() noexcept
188 {
189 if (_M_owners.fetch_sub(1, memory_order::acq_rel) == 1)
190 delete this;
191 }
192
193 void
194 _M_add_ssrc() noexcept
195 {
196 _M_value.fetch_add(_S_ssrc_counter_inc, memory_order::relaxed);
197 }
198
199 void
200 _M_sub_ssrc() noexcept
201 {
202 _M_value.fetch_sub(_S_ssrc_counter_inc, memory_order::release);
203 }
204
205 // Obtain lock.
206 void
207 _M_lock() noexcept
208 {
209 // Can use relaxed loads to get the current value.
210 // The successful call to _M_try_lock is an acquire operation.
211 auto __old = _M_value.load(memory_order::relaxed);
212 while (!_M_try_lock(__old, memory_order::relaxed))
213 { }
214 }
215
216 // Precondition: calling thread holds the lock.
217 void
218 _M_unlock() noexcept
219 {
220 _M_value.fetch_sub(_S_locked_bit, memory_order::release);
221 }
222
223 bool
224 _M_request_stop() noexcept
225 {
226 // obtain lock and set stop_requested bit
227 auto __old = _M_value.load(memory_order::acquire);
228 do
229 {
230 if (__old & _S_stop_requested_bit) // stop request already made
231 return false;
232 }
233 while (!_M_try_lock_and_stop(__old));
234
235 _M_requester = this_thread::get_id();
236
237 while (_M_head)
238 {
239 bool __last_cb;
240 _Stop_cb* __cb = _M_head;
241 _M_head = _M_head->_M_next;
242 if (_M_head)
243 {
244 _M_head->_M_prev = nullptr;
245 __last_cb = false;
246 }
247 else
248 __last_cb = true;
249
250 // Allow other callbacks to be unregistered while __cb runs.
251 _M_unlock();
252
253 bool __destroyed = false;
254 __cb->_M_destroyed = &__destroyed;
255
256 // run callback
257 __cb->_M_run();
258
259 if (!__destroyed)
260 {
261 __cb->_M_destroyed = nullptr;
262
263 // synchronize with destructor of stop_callback that owns *__cb
264 if (!__gnu_cxx::__is_single_threaded())
265 __cb->_M_done.release();
266 }
267
268 // Avoid relocking if we already know there are no more callbacks.
269 if (__last_cb)
270 return true;
271
272 _M_lock();
273 }
274
275 _M_unlock();
276 return true;
277 }
278
279 [[__gnu__::__nonnull__]]
280 bool
281 _M_register_callback(_Stop_cb* __cb) noexcept
282 {
283 auto __old = _M_value.load(memory_order::acquire);
284 do
285 {
286 if (__old & _S_stop_requested_bit) // stop request already made
287 {
288 __cb->_M_run(); // run synchronously
289 return false;
290 }
291
292 if (__old < _S_ssrc_counter_inc) // no stop_source owns *this
293 // No need to register callback if no stop request can be made.
294 // Returning false also means the stop_callback does not share
295 // ownership of this state, but that's not observable.
296 return false;
297 }
298 while (!_M_try_lock(__old));
299
300 __cb->_M_next = _M_head;
301 if (_M_head)
302 {
303 _M_head->_M_prev = __cb;
304 }
305 _M_head = __cb;
306 _M_unlock();
307 return true;
308 }
309
310 // Called by ~stop_callback just before destroying *__cb.
311 [[__gnu__::__nonnull__]]
312 void
313 _M_remove_callback(_Stop_cb* __cb)
314 {
315 _M_lock();
316
317 if (__cb == _M_head)
318 {
319 _M_head = _M_head->_M_next;
320 if (_M_head)
321 _M_head->_M_prev = nullptr;
322 _M_unlock();
323 return;
324 }
325 else if (__cb->_M_prev)
326 {
327 __cb->_M_prev->_M_next = __cb->_M_next;
328 if (__cb->_M_next)
329 __cb->_M_next->_M_prev = __cb->_M_prev;
330 _M_unlock();
331 return;
332 }
333
334 _M_unlock();
335
336 // Callback is not in the list, so must have been removed by a call to
337 // _M_request_stop.
338
339 // Despite appearances there is no data race on _M_requester. The only
340 // write to it happens before the callback is removed from the list,
341 // and removing it from the list happens before this read.
342 if (!(_M_requester == this_thread::get_id()))
343 {
344 // Synchronize with completion of callback.
345 __cb->_M_done.acquire();
346 // Safe for ~stop_callback to destroy *__cb now.
347 return;
348 }
349
350 if (__cb->_M_destroyed)
351 *__cb->_M_destroyed = true;
352 }
353
354 // Try to obtain the lock.
355 // Returns true if the lock is acquired (with memory order acquire).
356 // Otherwise, sets __curval = _M_value.load(__failure) and returns false.
357 // Might fail spuriously, so must be called in a loop.
358 bool
359 _M_try_lock(value_type& __curval,
360 memory_order __failure = memory_order::acquire) noexcept
361 {
362 return _M_do_try_lock(__curval, 0, memory_order::acquire, __failure);
363 }
364
365 // Try to obtain the lock to make a stop request.
366 // Returns true if the lock is acquired and the _S_stop_requested_bit is
367 // set (with memory order acq_rel so that other threads see the request).
368 // Otherwise, sets __curval = _M_value.load(memory_order::acquire) and
369 // returns false.
370 // Might fail spuriously, so must be called in a loop.
371 bool
372 _M_try_lock_and_stop(value_type& __curval) noexcept
373 {
374 return _M_do_try_lock(__curval, _S_stop_requested_bit,
375 memory_order::acq_rel, memory_order::acquire);
376 }
377
378 bool
379 _M_do_try_lock(value_type& __curval, value_type __newbits,
380 memory_order __success, memory_order __failure) noexcept
381 {
382 if (__curval & _S_locked_bit)
383 {
384 _S_yield();
385 __curval = _M_value.load(__failure);
386 return false;
387 }
388 __newbits |= _S_locked_bit;
389 return _M_value.compare_exchange_weak(__curval, __curval | __newbits,
390 __success, __failure);
391 }
392 };
393
394 struct _Stop_state_ref
395 {
396 _Stop_state_ref() = default;
397
398 explicit
399 _Stop_state_ref(const stop_source&)
400 : _M_ptr(new _Stop_state_t())
401 { }
402
403 _Stop_state_ref(const _Stop_state_ref& __other) noexcept
404 : _M_ptr(__other._M_ptr)
405 {
406 if (_M_ptr)
407 _M_ptr->_M_add_owner();
408 }
409
410 _Stop_state_ref(_Stop_state_ref&& __other) noexcept
411 : _M_ptr(__other._M_ptr)
412 {
413 __other._M_ptr = nullptr;
414 }
415
416 _Stop_state_ref&
417 operator=(const _Stop_state_ref& __other) noexcept
418 {
419 if (auto __ptr = __other._M_ptr; __ptr != _M_ptr)
420 {
421 if (__ptr)
422 __ptr->_M_add_owner();
423 if (_M_ptr)
424 _M_ptr->_M_release_ownership();
425 _M_ptr = __ptr;
426 }
427 return *this;
428 }
429
430 _Stop_state_ref&
431 operator=(_Stop_state_ref&& __other) noexcept
432 {
433 _Stop_state_ref(std::move(__other)).swap(*this);
434 return *this;
435 }
436
437 ~_Stop_state_ref()
438 {
439 if (_M_ptr)
440 _M_ptr->_M_release_ownership();
441 }
442
443 void
444 swap(_Stop_state_ref& __other) noexcept
445 { std::swap(_M_ptr, __other._M_ptr); }
446
447 explicit operator bool() const noexcept { return _M_ptr != nullptr; }
448
449 _Stop_state_t* operator->() const noexcept { return _M_ptr; }
450
451#if __cpp_impl_three_way_comparison >= 201907L
452 friend bool
453 operator==(const _Stop_state_ref&, const _Stop_state_ref&) = default;
454#else
455 friend bool
456 operator==(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs)
457 noexcept
458 { return __lhs._M_ptr == __rhs._M_ptr; }
459
460 friend bool
461 operator!=(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs)
462 noexcept
463 { return __lhs._M_ptr != __rhs._M_ptr; }
464#endif
465
466 private:
467 _Stop_state_t* _M_ptr = nullptr;
468 };
469
470 _Stop_state_ref _M_state;
471
472 explicit
473 stop_token(const _Stop_state_ref& __state) noexcept
474 : _M_state{__state}
475 { }
476 };
477
478 /// A type that allows a stop request to be made.
480 {
481 public:
482 stop_source() : _M_state(*this)
483 { }
484
485 explicit stop_source(std::nostopstate_t) noexcept
486 { }
487
488 stop_source(const stop_source& __other) noexcept
489 : _M_state(__other._M_state)
490 {
491 if (_M_state)
492 _M_state->_M_add_ssrc();
493 }
494
495 stop_source(stop_source&&) noexcept = default;
496
498 operator=(const stop_source& __other) noexcept
499 {
500 if (_M_state != __other._M_state)
501 {
502 stop_source __sink(std::move(*this));
503 _M_state = __other._M_state;
504 if (_M_state)
505 _M_state->_M_add_ssrc();
506 }
507 return *this;
508 }
509
511 operator=(stop_source&&) noexcept = default;
512
514 {
515 if (_M_state)
516 _M_state->_M_sub_ssrc();
517 }
518
519 [[nodiscard]]
520 bool
521 stop_possible() const noexcept
522 {
523 return static_cast<bool>(_M_state);
524 }
525
526 [[nodiscard]]
527 bool
528 stop_requested() const noexcept
529 {
530 return static_cast<bool>(_M_state) && _M_state->_M_stop_requested();
531 }
532
533 bool
534 request_stop() const noexcept
535 {
536 if (stop_possible())
537 return _M_state->_M_request_stop();
538 return false;
539 }
540
541 [[nodiscard]]
543 get_token() const noexcept
544 {
545 return stop_token{_M_state};
546 }
547
548 void
549 swap(stop_source& __other) noexcept
550 {
551 _M_state.swap(__other._M_state);
552 }
553
554 [[nodiscard]]
555 friend bool
556 operator==(const stop_source& __a, const stop_source& __b) noexcept
557 {
558 return __a._M_state == __b._M_state;
559 }
560
561 friend void
562 swap(stop_source& __lhs, stop_source& __rhs) noexcept
563 {
564 __lhs.swap(__rhs);
565 }
566
567 private:
568 stop_token::_Stop_state_ref _M_state;
569 };
570
571 /// A wrapper for callbacks to be run when a stop request is made.
572 template<typename _Callback>
573 class [[nodiscard]] stop_callback
574 {
575 static_assert(is_nothrow_destructible_v<_Callback>);
576 static_assert(is_invocable_v<_Callback>);
577
578 public:
579 using callback_type = _Callback;
580
581 template<typename _Cb,
583 explicit
584 stop_callback(const stop_token& __token, _Cb&& __cb)
585 noexcept(is_nothrow_constructible_v<_Callback, _Cb>)
586 : _M_cb(std::forward<_Cb>(__cb))
587 {
588 if (auto __state = __token._M_state)
589 {
590 if (__state->_M_register_callback(&_M_cb))
591 _M_state.swap(__state);
592 }
593 }
594
595 template<typename _Cb,
597 explicit
598 stop_callback(stop_token&& __token, _Cb&& __cb)
599 noexcept(is_nothrow_constructible_v<_Callback, _Cb>)
600 : _M_cb(std::forward<_Cb>(__cb))
601 {
602 if (auto& __state = __token._M_state)
603 {
604 if (__state->_M_register_callback(&_M_cb))
605 _M_state.swap(__state);
606 }
607 }
608
610 {
611 if (_M_state)
612 {
613 _M_state->_M_remove_callback(&_M_cb);
614 }
615 }
616
617 stop_callback(const stop_callback&) = delete;
618 stop_callback& operator=(const stop_callback&) = delete;
619 stop_callback(stop_callback&&) = delete;
620 stop_callback& operator=(stop_callback&&) = delete;
621
622 private:
623 struct _Cb_impl : stop_token::_Stop_cb
624 {
625 template<typename _Cb>
626 explicit
627 _Cb_impl(_Cb&& __cb)
628 : _Stop_cb(&_S_execute),
629 _M_cb(std::forward<_Cb>(__cb))
630 { }
631
632 _Callback _M_cb;
633
634 [[__gnu__::__nonnull__]]
635 static void
636 _S_execute(_Stop_cb* __that) noexcept
637 {
638 _Callback& __cb = static_cast<_Cb_impl*>(__that)->_M_cb;
639 std::forward<_Callback>(__cb)();
640 }
641 };
642
643 _Cb_impl _M_cb;
644 stop_token::_Stop_state_ref _M_state;
645 };
646
647 template<typename _Callback>
649
650_GLIBCXX_END_NAMESPACE_VERSION
651} // namespace
652#endif // __cplusplus > 201703L
653#endif // _GLIBCXX_STOP_TOKEN
typename enable_if< _Cond, _Tp >::type enable_if_t
Alias template for enable_if.
Definition: type_traits:2548
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:429
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:63
ISO C++ entities toplevel namespace is std.
void yield() noexcept
Allow the implementation to schedule a different thread.
Definition: std_thread.h:353
thread::id get_id() noexcept
The unique identifier of the current thread.
Definition: std_thread.h:340
Explicit specialization for int.
Definition: atomic:823
Tag type indicating a stop_source should have no shared-stop-state.
Definition: stop_token:48
Allow testing whether a stop request has been made on a stop_source.
Definition: stop_token:55
A type that allows a stop request to be made.
Definition: stop_token:480
A wrapper for callbacks to be run when a stop request is made.
Definition: stop_token:574