libstdc++
mutex
Go to the documentation of this file.
1 // <mutex> -*- C++ -*-
2 
3 // Copyright (C) 2003-2023 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_MUTEX
30 #define _GLIBCXX_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #include <bits/requires_hosted.h> // concurrency
35 
36 #if __cplusplus < 201103L
37 # include <bits/c++0x_warning.h>
38 #else
39 
40 #include <tuple>
41 #include <exception>
42 #include <type_traits>
43 #include <bits/chrono.h>
44 #include <bits/error_constants.h>
45 #include <bits/std_mutex.h>
46 #include <bits/unique_lock.h>
47 #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
48 # include <condition_variable>
49 # include <thread>
50 #endif
51 #include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
52 
53 #if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
54 # include <bits/std_function.h> // std::function
55 #endif
56 
57 namespace std _GLIBCXX_VISIBILITY(default)
58 {
59 _GLIBCXX_BEGIN_NAMESPACE_VERSION
60 
61  /**
62  * @addtogroup mutexes
63  * @{
64  */
65 
66 #ifdef _GLIBCXX_HAS_GTHREADS
67  /// @cond undocumented
68 
69  // Common base class for std::recursive_mutex and std::recursive_timed_mutex
70  class __recursive_mutex_base
71  {
72  protected:
73  typedef __gthread_recursive_mutex_t __native_type;
74 
75  __recursive_mutex_base(const __recursive_mutex_base&) = delete;
76  __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
77 
78 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
79  __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
80 
81  __recursive_mutex_base() = default;
82 #else
83  __native_type _M_mutex;
84 
85  __recursive_mutex_base()
86  {
87  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
88  __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
89  }
90 
91  ~__recursive_mutex_base()
92  { __gthread_recursive_mutex_destroy(&_M_mutex); }
93 #endif
94  };
95  /// @endcond
96 
97  /** The standard recursive mutex type.
98  *
99  * A recursive mutex can be locked more than once by the same thread.
100  * Other threads cannot lock the mutex until the owning thread unlocks it
101  * as many times as it was locked.
102  *
103  * @headerfile mutex
104  * @since C++11
105  */
106  class recursive_mutex : private __recursive_mutex_base
107  {
108  public:
109  typedef __native_type* native_handle_type;
110 
111  recursive_mutex() = default;
112  ~recursive_mutex() = default;
113 
114  recursive_mutex(const recursive_mutex&) = delete;
115  recursive_mutex& operator=(const recursive_mutex&) = delete;
116 
117  void
118  lock()
119  {
120  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
121 
122  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
123  if (__e)
124  __throw_system_error(__e);
125  }
126 
127  _GLIBCXX_NODISCARD
128  bool
129  try_lock() noexcept
130  {
131  // XXX EINVAL, EAGAIN, EBUSY
132  return !__gthread_recursive_mutex_trylock(&_M_mutex);
133  }
134 
135  void
136  unlock()
137  {
138  // XXX EINVAL, EAGAIN, EBUSY
139  __gthread_recursive_mutex_unlock(&_M_mutex);
140  }
141 
142  native_handle_type
143  native_handle() noexcept
144  { return &_M_mutex; }
145  };
146 
147 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
148  /// @cond undocumented
149 
150  template<typename _Derived>
151  class __timed_mutex_impl
152  {
153  protected:
154  template<typename _Rep, typename _Period>
155  bool
156  _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
157  {
158 #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
159  using __clock = chrono::steady_clock;
160 #else
161  using __clock = chrono::system_clock;
162 #endif
163 
164  auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
165  if (ratio_greater<__clock::period, _Period>())
166  ++__rt;
167  return _M_try_lock_until(__clock::now() + __rt);
168  }
169 
170  template<typename _Duration>
171  bool
172  _M_try_lock_until(const chrono::time_point<chrono::system_clock,
173  _Duration>& __atime)
174  {
175  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
176  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
177 
178  __gthread_time_t __ts = {
179  static_cast<std::time_t>(__s.time_since_epoch().count()),
180  static_cast<long>(__ns.count())
181  };
182 
183  return static_cast<_Derived*>(this)->_M_timedlock(__ts);
184  }
185 
186 #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
187  template<typename _Duration>
188  bool
189  _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
190  _Duration>& __atime)
191  {
192  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
193  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
194 
195  __gthread_time_t __ts = {
196  static_cast<std::time_t>(__s.time_since_epoch().count()),
197  static_cast<long>(__ns.count())
198  };
199 
200  return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
201  __ts);
202  }
203 #endif
204 
205  template<typename _Clock, typename _Duration>
206  bool
207  _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
208  {
209 #if __cplusplus > 201703L
210  static_assert(chrono::is_clock_v<_Clock>);
211 #endif
212  // The user-supplied clock may not tick at the same rate as
213  // steady_clock, so we must loop in order to guarantee that
214  // the timeout has expired before returning false.
215  auto __now = _Clock::now();
216  do {
217  auto __rtime = __atime - __now;
218  if (_M_try_lock_for(__rtime))
219  return true;
220  __now = _Clock::now();
221  } while (__atime > __now);
222  return false;
223  }
224  };
225  /// @endcond
226 
227  /** The standard timed mutex type.
228  *
229  * A non-recursive mutex that supports a timeout when trying to acquire the
230  * lock.
231  *
232  * @headerfile mutex
233  * @since C++11
234  */
235  class timed_mutex
236  : private __mutex_base, public __timed_mutex_impl<timed_mutex>
237  {
238  public:
239  typedef __native_type* native_handle_type;
240 
241  timed_mutex() = default;
242  ~timed_mutex() = default;
243 
244  timed_mutex(const timed_mutex&) = delete;
245  timed_mutex& operator=(const timed_mutex&) = delete;
246 
247  void
248  lock()
249  {
250  int __e = __gthread_mutex_lock(&_M_mutex);
251 
252  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
253  if (__e)
254  __throw_system_error(__e);
255  }
256 
257  _GLIBCXX_NODISCARD
258  bool
259  try_lock() noexcept
260  {
261  // XXX EINVAL, EAGAIN, EBUSY
262  return !__gthread_mutex_trylock(&_M_mutex);
263  }
264 
265  template <class _Rep, class _Period>
266  _GLIBCXX_NODISCARD
267  bool
268  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
269  { return _M_try_lock_for(__rtime); }
270 
271  template <class _Clock, class _Duration>
272  _GLIBCXX_NODISCARD
273  bool
274  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
275  { return _M_try_lock_until(__atime); }
276 
277  void
278  unlock()
279  {
280  // XXX EINVAL, EAGAIN, EBUSY
281  __gthread_mutex_unlock(&_M_mutex);
282  }
283 
284  native_handle_type
285  native_handle() noexcept
286  { return &_M_mutex; }
287 
288  private:
289  friend class __timed_mutex_impl<timed_mutex>;
290 
291  bool
292  _M_timedlock(const __gthread_time_t& __ts)
293  { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
294 
295 #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
296  bool
297  _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
298  { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
299 #endif
300  };
301 
302  /** The standard recursive timed mutex type.
303  *
304  * A recursive mutex that supports a timeout when trying to acquire the
305  * lock. A recursive mutex can be locked more than once by the same thread.
306  * Other threads cannot lock the mutex until the owning thread unlocks it
307  * as many times as it was locked.
308  *
309  * @headerfile mutex
310  * @since C++11
311  */
312  class recursive_timed_mutex
313  : private __recursive_mutex_base,
314  public __timed_mutex_impl<recursive_timed_mutex>
315  {
316  public:
317  typedef __native_type* native_handle_type;
318 
319  recursive_timed_mutex() = default;
320  ~recursive_timed_mutex() = default;
321 
322  recursive_timed_mutex(const recursive_timed_mutex&) = delete;
323  recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
324 
325  void
326  lock()
327  {
328  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
329 
330  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
331  if (__e)
332  __throw_system_error(__e);
333  }
334 
335  _GLIBCXX_NODISCARD
336  bool
337  try_lock() noexcept
338  {
339  // XXX EINVAL, EAGAIN, EBUSY
340  return !__gthread_recursive_mutex_trylock(&_M_mutex);
341  }
342 
343  template <class _Rep, class _Period>
344  _GLIBCXX_NODISCARD
345  bool
346  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
347  { return _M_try_lock_for(__rtime); }
348 
349  template <class _Clock, class _Duration>
350  _GLIBCXX_NODISCARD
351  bool
352  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
353  { return _M_try_lock_until(__atime); }
354 
355  void
356  unlock()
357  {
358  // XXX EINVAL, EAGAIN, EBUSY
359  __gthread_recursive_mutex_unlock(&_M_mutex);
360  }
361 
362  native_handle_type
363  native_handle() noexcept
364  { return &_M_mutex; }
365 
366  private:
367  friend class __timed_mutex_impl<recursive_timed_mutex>;
368 
369  bool
370  _M_timedlock(const __gthread_time_t& __ts)
371  { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
372 
373 #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
374  bool
375  _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
376  { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
377 #endif
378  };
379 
380 #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
381 
382  /// timed_mutex
383  class timed_mutex
384  {
385  mutex _M_mut;
386  condition_variable _M_cv;
387  bool _M_locked = false;
388 
389  public:
390 
391  timed_mutex() = default;
392  ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
393 
394  timed_mutex(const timed_mutex&) = delete;
395  timed_mutex& operator=(const timed_mutex&) = delete;
396 
397  void
398  lock()
399  {
400  unique_lock<mutex> __lk(_M_mut);
401  _M_cv.wait(__lk, [&]{ return !_M_locked; });
402  _M_locked = true;
403  }
404 
405  _GLIBCXX_NODISCARD
406  bool
407  try_lock()
408  {
409  lock_guard<mutex> __lk(_M_mut);
410  if (_M_locked)
411  return false;
412  _M_locked = true;
413  return true;
414  }
415 
416  template<typename _Rep, typename _Period>
417  _GLIBCXX_NODISCARD
418  bool
419  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
420  {
421  unique_lock<mutex> __lk(_M_mut);
422  if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
423  return false;
424  _M_locked = true;
425  return true;
426  }
427 
428  template<typename _Clock, typename _Duration>
429  _GLIBCXX_NODISCARD
430  bool
431  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
432  {
433  unique_lock<mutex> __lk(_M_mut);
434  if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
435  return false;
436  _M_locked = true;
437  return true;
438  }
439 
440  void
441  unlock()
442  {
443  lock_guard<mutex> __lk(_M_mut);
444  __glibcxx_assert( _M_locked );
445  _M_locked = false;
446  _M_cv.notify_one();
447  }
448  };
449 
450  /// recursive_timed_mutex
451  class recursive_timed_mutex
452  {
453  mutex _M_mut;
454  condition_variable _M_cv;
455  thread::id _M_owner;
456  unsigned _M_count = 0;
457 
458  // Predicate type that tests whether the current thread can lock a mutex.
459  struct _Can_lock
460  {
461  // Returns true if the mutex is unlocked or is locked by _M_caller.
462  bool
463  operator()() const noexcept
464  { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
465 
466  const recursive_timed_mutex* _M_mx;
467  thread::id _M_caller;
468  };
469 
470  public:
471 
472  recursive_timed_mutex() = default;
473  ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
474 
475  recursive_timed_mutex(const recursive_timed_mutex&) = delete;
476  recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
477 
478  void
479  lock()
480  {
481  auto __id = this_thread::get_id();
482  _Can_lock __can_lock{this, __id};
483  unique_lock<mutex> __lk(_M_mut);
484  _M_cv.wait(__lk, __can_lock);
485  if (_M_count == -1u)
486  __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
487  _M_owner = __id;
488  ++_M_count;
489  }
490 
491  _GLIBCXX_NODISCARD
492  bool
493  try_lock()
494  {
495  auto __id = this_thread::get_id();
496  _Can_lock __can_lock{this, __id};
497  lock_guard<mutex> __lk(_M_mut);
498  if (!__can_lock())
499  return false;
500  if (_M_count == -1u)
501  return false;
502  _M_owner = __id;
503  ++_M_count;
504  return true;
505  }
506 
507  template<typename _Rep, typename _Period>
508  _GLIBCXX_NODISCARD
509  bool
510  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
511  {
512  auto __id = this_thread::get_id();
513  _Can_lock __can_lock{this, __id};
514  unique_lock<mutex> __lk(_M_mut);
515  if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
516  return false;
517  if (_M_count == -1u)
518  return false;
519  _M_owner = __id;
520  ++_M_count;
521  return true;
522  }
523 
524  template<typename _Clock, typename _Duration>
525  _GLIBCXX_NODISCARD
526  bool
527  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
528  {
529  auto __id = this_thread::get_id();
530  _Can_lock __can_lock{this, __id};
531  unique_lock<mutex> __lk(_M_mut);
532  if (!_M_cv.wait_until(__lk, __atime, __can_lock))
533  return false;
534  if (_M_count == -1u)
535  return false;
536  _M_owner = __id;
537  ++_M_count;
538  return true;
539  }
540 
541  void
542  unlock()
543  {
544  lock_guard<mutex> __lk(_M_mut);
545  __glibcxx_assert( _M_owner == this_thread::get_id() );
546  __glibcxx_assert( _M_count > 0 );
547  if (--_M_count == 0)
548  {
549  _M_owner = {};
550  _M_cv.notify_one();
551  }
552  }
553  };
554 
555 #endif
556 #endif // _GLIBCXX_HAS_GTHREADS
557 
558  /// @cond undocumented
559  namespace __detail
560  {
561  // Lock the last lockable, after all previous ones are locked.
562  template<typename _Lockable>
563  inline int
564  __try_lock_impl(_Lockable& __l)
565  {
566  if (unique_lock<_Lockable> __lock{__l, try_to_lock})
567  {
568  __lock.release();
569  return -1;
570  }
571  else
572  return 0;
573  }
574 
575  // Lock each lockable in turn.
576  // Use iteration if all lockables are the same type, recursion otherwise.
577  template<typename _L0, typename... _Lockables>
578  inline int
579  __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
580  {
581 #if __cplusplus >= 201703L
582  if constexpr ((is_same_v<_L0, _Lockables> && ...))
583  {
584  constexpr int _Np = 1 + sizeof...(_Lockables);
585  unique_lock<_L0> __locks[_Np] = {
586  {__l0, defer_lock}, {__lockables, defer_lock}...
587  };
588  for (int __i = 0; __i < _Np; ++__i)
589  {
590  if (!__locks[__i].try_lock())
591  {
592  const int __failed = __i;
593  while (__i--)
594  __locks[__i].unlock();
595  return __failed;
596  }
597  }
598  for (auto& __l : __locks)
599  __l.release();
600  return -1;
601  }
602  else
603 #endif
604  if (unique_lock<_L0> __lock{__l0, try_to_lock})
605  {
606  int __idx = __detail::__try_lock_impl(__lockables...);
607  if (__idx == -1)
608  {
609  __lock.release();
610  return -1;
611  }
612  return __idx + 1;
613  }
614  else
615  return 0;
616  }
617 
618  } // namespace __detail
619  /// @endcond
620 
621  /** @brief Generic try_lock.
622  * @param __l1 Meets Lockable requirements (try_lock() may throw).
623  * @param __l2 Meets Lockable requirements (try_lock() may throw).
624  * @param __l3 Meets Lockable requirements (try_lock() may throw).
625  * @return Returns -1 if all try_lock() calls return true. Otherwise returns
626  * a 0-based index corresponding to the argument that returned false.
627  * @post Either all arguments are locked, or none will be.
628  *
629  * Sequentially calls try_lock() on each argument.
630  */
631  template<typename _L1, typename _L2, typename... _L3>
632  _GLIBCXX_NODISCARD
633  inline int
634  try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
635  {
636  return __detail::__try_lock_impl(__l1, __l2, __l3...);
637  }
638 
639  /// @cond undocumented
640  namespace __detail
641  {
642  // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
643  // On each recursion the lockables are rotated left one position,
644  // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
645  // When a call to l_i.try_lock() fails it recurses/returns to depth=i
646  // so that l_i is the first argument, and then blocks until l_i is locked.
647  template<typename _L0, typename... _L1>
648  void
649  __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
650  {
651  while (__i >= __depth)
652  {
653  if (__i == __depth)
654  {
655  int __failed = 1; // index that couldn't be locked
656  {
657  unique_lock<_L0> __first(__l0);
658  __failed += __detail::__try_lock_impl(__l1...);
659  if (!__failed)
660  {
661  __i = -1; // finished
662  __first.release();
663  return;
664  }
665  }
666 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
667  __gthread_yield();
668 #endif
669  constexpr auto __n = 1 + sizeof...(_L1);
670  __i = (__depth + __failed) % __n;
671  }
672  else // rotate left until l_i is first.
673  __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
674  }
675  }
676 
677  } // namespace __detail
678  /// @endcond
679 
680  /** @brief Generic lock.
681  * @param __l1 Meets Lockable requirements (try_lock() may throw).
682  * @param __l2 Meets Lockable requirements (try_lock() may throw).
683  * @param __l3 Meets Lockable requirements (try_lock() may throw).
684  * @throw An exception thrown by an argument's lock() or try_lock() member.
685  * @post All arguments are locked.
686  *
687  * All arguments are locked via a sequence of calls to lock(), try_lock()
688  * and unlock(). If this function exits via an exception any locks that
689  * were obtained will be released.
690  */
691  template<typename _L1, typename _L2, typename... _L3>
692  void
693  lock(_L1& __l1, _L2& __l2, _L3&... __l3)
694  {
695 #if __cplusplus >= 201703L
696  if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
697  {
698  constexpr int _Np = 2 + sizeof...(_L3);
699  unique_lock<_L1> __locks[] = {
700  {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
701  };
702  int __first = 0;
703  do {
704  __locks[__first].lock();
705  for (int __j = 1; __j < _Np; ++__j)
706  {
707  const int __idx = (__first + __j) % _Np;
708  if (!__locks[__idx].try_lock())
709  {
710  for (int __k = __j; __k != 0; --__k)
711  __locks[(__first + __k - 1) % _Np].unlock();
712  __first = __idx;
713  break;
714  }
715  }
716  } while (!__locks[__first].owns_lock());
717 
718  for (auto& __l : __locks)
719  __l.release();
720  }
721  else
722 #endif
723  {
724  int __i = 0;
725  __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
726  }
727  }
728 
729 #if __cplusplus >= 201703L
730 #define __cpp_lib_scoped_lock 201703L
731  /** @brief A scoped lock type for multiple lockable objects.
732  *
733  * A scoped_lock controls mutex ownership within a scope, releasing
734  * ownership in the destructor.
735  *
736  * @headerfile mutex
737  * @since C++17
738  */
739  template<typename... _MutexTypes>
740  class scoped_lock
741  {
742  public:
743  explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
744  { std::lock(__m...); }
745 
746  explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
747  : _M_devices(std::tie(__m...))
748  { } // calling thread owns mutex
749 
750  ~scoped_lock()
751  { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
752 
753  scoped_lock(const scoped_lock&) = delete;
754  scoped_lock& operator=(const scoped_lock&) = delete;
755 
756  private:
757  tuple<_MutexTypes&...> _M_devices;
758  };
759 
760  template<>
761  class scoped_lock<>
762  {
763  public:
764  explicit scoped_lock() = default;
765  explicit scoped_lock(adopt_lock_t) noexcept { }
766  ~scoped_lock() = default;
767 
768  scoped_lock(const scoped_lock&) = delete;
769  scoped_lock& operator=(const scoped_lock&) = delete;
770  };
771 
772  template<typename _Mutex>
773  class scoped_lock<_Mutex>
774  {
775  public:
776  using mutex_type = _Mutex;
777 
778  explicit scoped_lock(mutex_type& __m) : _M_device(__m)
779  { _M_device.lock(); }
780 
781  explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
782  : _M_device(__m)
783  { } // calling thread owns mutex
784 
785  ~scoped_lock()
786  { _M_device.unlock(); }
787 
788  scoped_lock(const scoped_lock&) = delete;
789  scoped_lock& operator=(const scoped_lock&) = delete;
790 
791  private:
792  mutex_type& _M_device;
793  };
794 #endif // C++17
795 
796 #ifdef _GLIBCXX_HAS_GTHREADS
797  /// Flag type used by std::call_once
798  struct once_flag
799  {
800  constexpr once_flag() noexcept = default;
801 
802  /// Deleted copy constructor
803  once_flag(const once_flag&) = delete;
804  /// Deleted assignment operator
805  once_flag& operator=(const once_flag&) = delete;
806 
807  private:
808  // For gthreads targets a pthread_once_t is used with pthread_once, but
809  // for most targets this doesn't work correctly for exceptional executions.
810  __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
811 
812  struct _Prepare_execution;
813 
814  template<typename _Callable, typename... _Args>
815  friend void
816  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
817  };
818 
819  /// @cond undocumented
820 # ifdef _GLIBCXX_HAVE_TLS
821  // If TLS is available use thread-local state for the type-erased callable
822  // that is being run by std::call_once in the current thread.
823  extern __thread void* __once_callable;
824  extern __thread void (*__once_call)();
825 
826  // RAII type to set up state for pthread_once call.
827  struct once_flag::_Prepare_execution
828  {
829  template<typename _Callable>
830  explicit
831  _Prepare_execution(_Callable& __c)
832  {
833  // Store address in thread-local pointer:
834  __once_callable = std::__addressof(__c);
835  // Trampoline function to invoke the closure via thread-local pointer:
836  __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
837  }
838 
839  ~_Prepare_execution()
840  {
841  // PR libstdc++/82481
842  __once_callable = nullptr;
843  __once_call = nullptr;
844  }
845 
846  _Prepare_execution(const _Prepare_execution&) = delete;
847  _Prepare_execution& operator=(const _Prepare_execution&) = delete;
848  };
849 
850 # else
851  // Without TLS use a global std::mutex and store the callable in a
852  // global std::function.
853  extern function<void()> __once_functor;
854 
855  extern void
856  __set_once_functor_lock_ptr(unique_lock<mutex>*);
857 
858  extern mutex&
859  __get_once_mutex();
860 
861  // RAII type to set up state for pthread_once call.
862  struct once_flag::_Prepare_execution
863  {
864  template<typename _Callable>
865  explicit
866  _Prepare_execution(_Callable& __c)
867  {
868  // Store the callable in the global std::function
869  __once_functor = __c;
870  __set_once_functor_lock_ptr(&_M_functor_lock);
871  }
872 
873  ~_Prepare_execution()
874  {
875  if (_M_functor_lock)
876  __set_once_functor_lock_ptr(nullptr);
877  }
878 
879  private:
880  // XXX This deadlocks if used recursively (PR 97949)
881  unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
882 
883  _Prepare_execution(const _Prepare_execution&) = delete;
884  _Prepare_execution& operator=(const _Prepare_execution&) = delete;
885  };
886 # endif
887  /// @endcond
888 
889  // This function is passed to pthread_once by std::call_once.
890  // It runs __once_call() or __once_functor().
891  extern "C" void __once_proxy(void);
892 
893  /// Invoke a callable and synchronize with other calls using the same flag
894  template<typename _Callable, typename... _Args>
895  void
896  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
897  {
898  // Closure type that runs the function
899  auto __callable = [&] {
900  std::__invoke(std::forward<_Callable>(__f),
901  std::forward<_Args>(__args)...);
902  };
903 
904  once_flag::_Prepare_execution __exec(__callable);
905 
906  // XXX pthread_once does not reset the flag if an exception is thrown.
907  if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
908  __throw_system_error(__e);
909  }
910 
911 #else // _GLIBCXX_HAS_GTHREADS
912 
913  /// Flag type used by std::call_once
914  struct once_flag
915  {
916  constexpr once_flag() noexcept = default;
917 
918  /// Deleted copy constructor
919  once_flag(const once_flag&) = delete;
920  /// Deleted assignment operator
921  once_flag& operator=(const once_flag&) = delete;
922 
923  private:
924  // There are two different std::once_flag interfaces, abstracting four
925  // different implementations.
926  // The single-threaded interface uses the _M_activate() and _M_finish(bool)
927  // functions, which start and finish an active execution respectively.
928  // See [thread.once.callonce] in C++11 for the definition of
929  // active/passive/returning/exceptional executions.
930  enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
931 
932  int _M_once = _Bits::_Init;
933 
934  // Check to see if all executions will be passive now.
935  bool
936  _M_passive() const noexcept;
937 
938  // Attempts to begin an active execution.
939  bool _M_activate();
940 
941  // Must be called to complete an active execution.
942  // The argument is true if the active execution was a returning execution,
943  // false if it was an exceptional execution.
944  void _M_finish(bool __returning) noexcept;
945 
946  // RAII helper to call _M_finish.
947  struct _Active_execution
948  {
949  explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
950 
951  ~_Active_execution() { _M_flag._M_finish(_M_returning); }
952 
953  _Active_execution(const _Active_execution&) = delete;
954  _Active_execution& operator=(const _Active_execution&) = delete;
955 
956  once_flag& _M_flag;
957  bool _M_returning = false;
958  };
959 
960  template<typename _Callable, typename... _Args>
961  friend void
962  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
963  };
964 
965  // Inline definitions of std::once_flag members for single-threaded targets.
966 
967  inline bool
968  once_flag::_M_passive() const noexcept
969  { return _M_once == _Bits::_Done; }
970 
971  inline bool
972  once_flag::_M_activate()
973  {
974  if (_M_once == _Bits::_Init) [[__likely__]]
975  {
976  _M_once = _Bits::_Active;
977  return true;
978  }
979  else if (_M_passive()) // Caller should have checked this already.
980  return false;
981  else
982  __throw_system_error(EDEADLK);
983  }
984 
985  inline void
986  once_flag::_M_finish(bool __returning) noexcept
987  { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
988 
989  /// Invoke a callable and synchronize with other calls using the same flag
990  template<typename _Callable, typename... _Args>
991  inline void
992  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
993  {
994  if (__once._M_passive())
995  return;
996  else if (__once._M_activate())
997  {
998  once_flag::_Active_execution __exec(__once);
999 
1000  // _GLIBCXX_RESOLVE_LIB_DEFECTS
1001  // 2442. call_once() shouldn't DECAY_COPY()
1002  std::__invoke(std::forward<_Callable>(__f),
1003  std::forward<_Args>(__args)...);
1004 
1005  // __f(__args...) did not throw
1006  __exec._M_returning = true;
1007  }
1008  }
1009 #endif // _GLIBCXX_HAS_GTHREADS
1010 
1011  /// @} group mutexes
1012 _GLIBCXX_END_NAMESPACE_VERSION
1013 } // namespace
1014 
1015 #endif // C++11
1016 
1017 #endif // _GLIBCXX_MUTEX