libstdc++
shared_mutex
Go to the documentation of this file.
1 // <shared_mutex> -*- C++ -*-
2 
3 // Copyright (C) 2013-2023 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/shared_mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #include <bits/requires_hosted.h> // concurrency
35 
36 #if __cplusplus >= 201402L
37 
38 #include <bits/chrono.h>
39 #include <bits/error_constants.h>
40 #include <bits/functexcept.h>
41 #include <bits/move.h> // move, __exchange
42 #include <bits/std_mutex.h> // defer_lock_t
43 
44 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
45 # include <condition_variable>
46 #endif
47 
48 namespace std _GLIBCXX_VISIBILITY(default)
49 {
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 
52  /**
53  * @addtogroup mutexes
54  * @{
55  */
56 
57 #ifdef _GLIBCXX_HAS_GTHREADS
58 
59 #if __cplusplus >= 201703L
60 #define __cpp_lib_shared_mutex 201505L
61  class shared_mutex;
62 #endif
63 
64 #define __cpp_lib_shared_timed_mutex 201402L
65  class shared_timed_mutex;
66 
67  /// @cond undocumented
68 
69 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
70 #ifdef __gthrw
71 #define _GLIBCXX_GTHRW(name) \
72  __gthrw(pthread_ ## name); \
73  static inline int \
74  __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
75  { \
76  if (__gthread_active_p ()) \
77  return __gthrw_(pthread_ ## name) (__rwlock); \
78  else \
79  return 0; \
80  }
81  _GLIBCXX_GTHRW(rwlock_rdlock)
82  _GLIBCXX_GTHRW(rwlock_tryrdlock)
83  _GLIBCXX_GTHRW(rwlock_wrlock)
84  _GLIBCXX_GTHRW(rwlock_trywrlock)
85  _GLIBCXX_GTHRW(rwlock_unlock)
86 # ifndef PTHREAD_RWLOCK_INITIALIZER
87  _GLIBCXX_GTHRW(rwlock_destroy)
88  __gthrw(pthread_rwlock_init);
89  static inline int
90  __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
91  {
92  if (__gthread_active_p ())
93  return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
94  else
95  return 0;
96  }
97 # endif
98 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
99  __gthrw(pthread_rwlock_timedrdlock);
100  static inline int
101  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
102  const timespec *__ts)
103  {
104  if (__gthread_active_p ())
105  return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
106  else
107  return 0;
108  }
109  __gthrw(pthread_rwlock_timedwrlock);
110  static inline int
111  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
112  const timespec *__ts)
113  {
114  if (__gthread_active_p ())
115  return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
116  else
117  return 0;
118  }
119 # endif
120 #else
121  static inline int
122  __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
123  { return pthread_rwlock_rdlock (__rwlock); }
124  static inline int
125  __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
126  { return pthread_rwlock_tryrdlock (__rwlock); }
127  static inline int
128  __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
129  { return pthread_rwlock_wrlock (__rwlock); }
130  static inline int
131  __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
132  { return pthread_rwlock_trywrlock (__rwlock); }
133  static inline int
134  __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
135  { return pthread_rwlock_unlock (__rwlock); }
136  static inline int
137  __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
138  { return pthread_rwlock_destroy (__rwlock); }
139  static inline int
140  __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
141  { return pthread_rwlock_init (__rwlock, NULL); }
142 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
143  static inline int
144  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
145  const timespec *__ts)
146  { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
147  static inline int
148  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
149  const timespec *__ts)
150  { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
151 # endif
152 #endif
153 
154  /// A shared mutex type implemented using pthread_rwlock_t.
155  class __shared_mutex_pthread
156  {
157  friend class shared_timed_mutex;
158 
159 #ifdef PTHREAD_RWLOCK_INITIALIZER
160  pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
161 
162  public:
163  __shared_mutex_pthread() = default;
164  ~__shared_mutex_pthread() = default;
165 #else
166  pthread_rwlock_t _M_rwlock;
167 
168  public:
169  __shared_mutex_pthread()
170  {
171  int __ret = __glibcxx_rwlock_init(&_M_rwlock);
172  if (__ret == ENOMEM)
173  __throw_bad_alloc();
174  else if (__ret == EAGAIN)
175  __throw_system_error(int(errc::resource_unavailable_try_again));
176  else if (__ret == EPERM)
177  __throw_system_error(int(errc::operation_not_permitted));
178  // Errors not handled: EBUSY, EINVAL
179  __glibcxx_assert(__ret == 0);
180  }
181 
182  ~__shared_mutex_pthread()
183  {
184  int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
185  // Errors not handled: EBUSY, EINVAL
186  __glibcxx_assert(__ret == 0);
187  }
188 #endif
189 
190  __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
191  __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
192 
193  void
194  lock()
195  {
196  int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
197  if (__ret == EDEADLK)
198  __throw_system_error(int(errc::resource_deadlock_would_occur));
199  // Errors not handled: EINVAL
200  __glibcxx_assert(__ret == 0);
201  }
202 
203  bool
204  try_lock()
205  {
206  int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
207  if (__ret == EBUSY) return false;
208  // Errors not handled: EINVAL
209  __glibcxx_assert(__ret == 0);
210  return true;
211  }
212 
213  void
214  unlock()
215  {
216  int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
217  // Errors not handled: EPERM, EBUSY, EINVAL
218  __glibcxx_assert(__ret == 0);
219  }
220 
221  // Shared ownership
222 
223  void
224  lock_shared()
225  {
226  int __ret;
227  // We retry if we exceeded the maximum number of read locks supported by
228  // the POSIX implementation; this can result in busy-waiting, but this
229  // is okay based on the current specification of forward progress
230  // guarantees by the standard.
231  do
232  __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
233  while (__ret == EAGAIN);
234  if (__ret == EDEADLK)
235  __throw_system_error(int(errc::resource_deadlock_would_occur));
236  // Errors not handled: EINVAL
237  __glibcxx_assert(__ret == 0);
238  }
239 
240  bool
241  try_lock_shared()
242  {
243  int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
244  // If the maximum number of read locks has been exceeded, we just fail
245  // to acquire the lock. Unlike for lock(), we are not allowed to throw
246  // an exception.
247  if (__ret == EBUSY || __ret == EAGAIN) return false;
248  // Errors not handled: EINVAL
249  __glibcxx_assert(__ret == 0);
250  return true;
251  }
252 
253  void
254  unlock_shared()
255  {
256  unlock();
257  }
258 
259  void* native_handle() { return &_M_rwlock; }
260  };
261 #endif
262 
263 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
264  /// A shared mutex type implemented using std::condition_variable.
265  class __shared_mutex_cv
266  {
267  friend class shared_timed_mutex;
268 
269  // Based on Howard Hinnant's reference implementation from N2406.
270 
271  // The high bit of _M_state is the write-entered flag which is set to
272  // indicate a writer has taken the lock or is queuing to take the lock.
273  // The remaining bits are the count of reader locks.
274  //
275  // To take a reader lock, block on gate1 while the write-entered flag is
276  // set or the maximum number of reader locks is held, then increment the
277  // reader lock count.
278  // To release, decrement the count, then if the write-entered flag is set
279  // and the count is zero then signal gate2 to wake a queued writer,
280  // otherwise if the maximum number of reader locks was held signal gate1
281  // to wake a reader.
282  //
283  // To take a writer lock, block on gate1 while the write-entered flag is
284  // set, then set the write-entered flag to start queueing, then block on
285  // gate2 while the number of reader locks is non-zero.
286  // To release, unset the write-entered flag and signal gate1 to wake all
287  // blocked readers and writers.
288  //
289  // This means that when no reader locks are held readers and writers get
290  // equal priority. When one or more reader locks is held a writer gets
291  // priority and no more reader locks can be taken while the writer is
292  // queued.
293 
294  // Only locked when accessing _M_state or waiting on condition variables.
295  mutex _M_mut;
296  // Used to block while write-entered is set or reader count at maximum.
297  condition_variable _M_gate1;
298  // Used to block queued writers while reader count is non-zero.
299  condition_variable _M_gate2;
300  // The write-entered flag and reader count.
301  unsigned _M_state;
302 
303  static constexpr unsigned _S_write_entered
304  = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
305  static constexpr unsigned _S_max_readers = ~_S_write_entered;
306 
307  // Test whether the write-entered flag is set. _M_mut must be locked.
308  bool _M_write_entered() const { return _M_state & _S_write_entered; }
309 
310  // The number of reader locks currently held. _M_mut must be locked.
311  unsigned _M_readers() const { return _M_state & _S_max_readers; }
312 
313  public:
314  __shared_mutex_cv() : _M_state(0) {}
315 
316  ~__shared_mutex_cv()
317  {
318  __glibcxx_assert( _M_state == 0 );
319  }
320 
321  __shared_mutex_cv(const __shared_mutex_cv&) = delete;
322  __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
323 
324  // Exclusive ownership
325 
326  void
327  lock()
328  {
329  unique_lock<mutex> __lk(_M_mut);
330  // Wait until we can set the write-entered flag.
331  _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
332  _M_state |= _S_write_entered;
333  // Then wait until there are no more readers.
334  _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
335  }
336 
337  bool
338  try_lock()
339  {
340  unique_lock<mutex> __lk(_M_mut, try_to_lock);
341  if (__lk.owns_lock() && _M_state == 0)
342  {
343  _M_state = _S_write_entered;
344  return true;
345  }
346  return false;
347  }
348 
349  void
350  unlock()
351  {
352  lock_guard<mutex> __lk(_M_mut);
353  __glibcxx_assert( _M_write_entered() );
354  _M_state = 0;
355  // call notify_all() while mutex is held so that another thread can't
356  // lock and unlock the mutex then destroy *this before we make the call.
357  _M_gate1.notify_all();
358  }
359 
360  // Shared ownership
361 
362  void
363  lock_shared()
364  {
365  unique_lock<mutex> __lk(_M_mut);
366  _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
367  ++_M_state;
368  }
369 
370  bool
371  try_lock_shared()
372  {
373  unique_lock<mutex> __lk(_M_mut, try_to_lock);
374  if (!__lk.owns_lock())
375  return false;
376  if (_M_state < _S_max_readers)
377  {
378  ++_M_state;
379  return true;
380  }
381  return false;
382  }
383 
384  void
385  unlock_shared()
386  {
387  lock_guard<mutex> __lk(_M_mut);
388  __glibcxx_assert( _M_readers() > 0 );
389  auto __prev = _M_state--;
390  if (_M_write_entered())
391  {
392  // Wake the queued writer if there are no more readers.
393  if (_M_readers() == 0)
394  _M_gate2.notify_one();
395  // No need to notify gate1 because we give priority to the queued
396  // writer, and that writer will eventually notify gate1 after it
397  // clears the write-entered flag.
398  }
399  else
400  {
401  // Wake any thread that was blocked on reader overflow.
402  if (__prev == _S_max_readers)
403  _M_gate1.notify_one();
404  }
405  }
406  };
407 #endif
408  /// @endcond
409 
410 #if __cplusplus >= 201703L
411  /// The standard shared mutex type.
412  class shared_mutex
413  {
414  public:
415  shared_mutex() = default;
416  ~shared_mutex() = default;
417 
418  shared_mutex(const shared_mutex&) = delete;
419  shared_mutex& operator=(const shared_mutex&) = delete;
420 
421  // Exclusive ownership
422 
423  void lock() { _M_impl.lock(); }
424  [[nodiscard]] bool try_lock() { return _M_impl.try_lock(); }
425  void unlock() { _M_impl.unlock(); }
426 
427  // Shared ownership
428 
429  void lock_shared() { _M_impl.lock_shared(); }
430  [[nodiscard]] bool try_lock_shared() { return _M_impl.try_lock_shared(); }
431  void unlock_shared() { _M_impl.unlock_shared(); }
432 
433 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
434  typedef void* native_handle_type;
435  native_handle_type native_handle() { return _M_impl.native_handle(); }
436 
437  private:
438  __shared_mutex_pthread _M_impl;
439 #else
440  private:
441  __shared_mutex_cv _M_impl;
442 #endif
443  };
444 #endif // C++17
445 
446  /// @cond undocumented
447 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
448  using __shared_timed_mutex_base = __shared_mutex_pthread;
449 #else
450  using __shared_timed_mutex_base = __shared_mutex_cv;
451 #endif
452  /// @endcond
453 
454  /// The standard shared timed mutex type.
455  class shared_timed_mutex
456  : private __shared_timed_mutex_base
457  {
458  using _Base = __shared_timed_mutex_base;
459 
460  // Must use the same clock as condition_variable for __shared_mutex_cv.
461 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
462  using __clock_t = chrono::steady_clock;
463 #else
464  using __clock_t = chrono::system_clock;
465 #endif
466 
467  public:
468  shared_timed_mutex() = default;
469  ~shared_timed_mutex() = default;
470 
471  shared_timed_mutex(const shared_timed_mutex&) = delete;
472  shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
473 
474  // Exclusive ownership
475 
476  void lock() { _Base::lock(); }
477  _GLIBCXX_NODISCARD bool try_lock() { return _Base::try_lock(); }
478  void unlock() { _Base::unlock(); }
479 
480  template<typename _Rep, typename _Period>
481  _GLIBCXX_NODISCARD
482  bool
483  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
484  {
485  auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
486  if (ratio_greater<__clock_t::period, _Period>())
487  ++__rt;
488  return try_lock_until(__clock_t::now() + __rt);
489  }
490 
491  // Shared ownership
492 
493  void lock_shared() { _Base::lock_shared(); }
494  _GLIBCXX_NODISCARD
495  bool try_lock_shared() { return _Base::try_lock_shared(); }
496  void unlock_shared() { _Base::unlock_shared(); }
497 
498  template<typename _Rep, typename _Period>
499  _GLIBCXX_NODISCARD
500  bool
501  try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
502  {
503  auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
504  if (ratio_greater<__clock_t::period, _Period>())
505  ++__rt;
506  return try_lock_shared_until(__clock_t::now() + __rt);
507  }
508 
509 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
510 
511  // Exclusive ownership
512 
513  template<typename _Duration>
514  _GLIBCXX_NODISCARD
515  bool
516  try_lock_until(const chrono::time_point<chrono::system_clock,
517  _Duration>& __atime)
518  {
519  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
520  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
521 
522  __gthread_time_t __ts =
523  {
524  static_cast<std::time_t>(__s.time_since_epoch().count()),
525  static_cast<long>(__ns.count())
526  };
527 
528  int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
529  // On self-deadlock, we just fail to acquire the lock. Technically,
530  // the program violated the precondition.
531  if (__ret == ETIMEDOUT || __ret == EDEADLK)
532  return false;
533  // Errors not handled: EINVAL
534  __glibcxx_assert(__ret == 0);
535  return true;
536  }
537 
538 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
539  template<typename _Duration>
540  _GLIBCXX_NODISCARD
541  bool
542  try_lock_until(const chrono::time_point<chrono::steady_clock,
543  _Duration>& __atime)
544  {
545  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
546  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
547 
548  __gthread_time_t __ts =
549  {
550  static_cast<std::time_t>(__s.time_since_epoch().count()),
551  static_cast<long>(__ns.count())
552  };
553 
554  int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
555  &__ts);
556  // On self-deadlock, we just fail to acquire the lock. Technically,
557  // the program violated the precondition.
558  if (__ret == ETIMEDOUT || __ret == EDEADLK)
559  return false;
560  // Errors not handled: EINVAL
561  __glibcxx_assert(__ret == 0);
562  return true;
563  }
564 #endif
565 
566  template<typename _Clock, typename _Duration>
567  _GLIBCXX_NODISCARD
568  bool
569  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
570  {
571 #if __cplusplus > 201703L
572  static_assert(chrono::is_clock_v<_Clock>);
573 #endif
574  // The user-supplied clock may not tick at the same rate as
575  // steady_clock, so we must loop in order to guarantee that
576  // the timeout has expired before returning false.
577  typename _Clock::time_point __now = _Clock::now();
578  do {
579  auto __rtime = __atime - __now;
580  if (try_lock_for(__rtime))
581  return true;
582  __now = _Clock::now();
583  } while (__atime > __now);
584  return false;
585  }
586 
587  // Shared ownership
588 
589  template<typename _Duration>
590  _GLIBCXX_NODISCARD
591  bool
592  try_lock_shared_until(const chrono::time_point<chrono::system_clock,
593  _Duration>& __atime)
594  {
595  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
596  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
597 
598  __gthread_time_t __ts =
599  {
600  static_cast<std::time_t>(__s.time_since_epoch().count()),
601  static_cast<long>(__ns.count())
602  };
603 
604  int __ret;
605  // Unlike for lock(), we are not allowed to throw an exception so if
606  // the maximum number of read locks has been exceeded, or we would
607  // deadlock, we just try to acquire the lock again (and will time out
608  // eventually).
609  // In cases where we would exceed the maximum number of read locks
610  // throughout the whole time until the timeout, we will fail to
611  // acquire the lock even if it would be logically free; however, this
612  // is allowed by the standard, and we made a "strong effort"
613  // (see C++14 30.4.1.4p26).
614  // For cases where the implementation detects a deadlock we
615  // intentionally block and timeout so that an early return isn't
616  // mistaken for a spurious failure, which might help users realise
617  // there is a deadlock.
618  do
619  __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
620  while (__ret == EAGAIN || __ret == EDEADLK);
621  if (__ret == ETIMEDOUT)
622  return false;
623  // Errors not handled: EINVAL
624  __glibcxx_assert(__ret == 0);
625  return true;
626  }
627 
628 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
629  template<typename _Duration>
630  _GLIBCXX_NODISCARD
631  bool
632  try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
633  _Duration>& __atime)
634  {
635  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
636  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
637 
638  __gthread_time_t __ts =
639  {
640  static_cast<std::time_t>(__s.time_since_epoch().count()),
641  static_cast<long>(__ns.count())
642  };
643 
644  int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
645  &__ts);
646  // On self-deadlock, we just fail to acquire the lock. Technically,
647  // the program violated the precondition.
648  if (__ret == ETIMEDOUT || __ret == EDEADLK)
649  return false;
650  // Errors not handled: EINVAL
651  __glibcxx_assert(__ret == 0);
652  return true;
653  }
654 #endif
655 
656  template<typename _Clock, typename _Duration>
657  _GLIBCXX_NODISCARD
658  bool
659  try_lock_shared_until(const chrono::time_point<_Clock,
660  _Duration>& __atime)
661  {
662 #if __cplusplus > 201703L
663  static_assert(chrono::is_clock_v<_Clock>);
664 #endif
665  // The user-supplied clock may not tick at the same rate as
666  // steady_clock, so we must loop in order to guarantee that
667  // the timeout has expired before returning false.
668  typename _Clock::time_point __now = _Clock::now();
669  do {
670  auto __rtime = __atime - __now;
671  if (try_lock_shared_for(__rtime))
672  return true;
673  __now = _Clock::now();
674  } while (__atime > __now);
675  return false;
676  }
677 
678 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
679 
680  // Exclusive ownership
681 
682  template<typename _Clock, typename _Duration>
683  _GLIBCXX_NODISCARD
684  bool
685  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
686  {
687  unique_lock<mutex> __lk(_M_mut);
688  if (!_M_gate1.wait_until(__lk, __abs_time,
689  [=]{ return !_M_write_entered(); }))
690  {
691  return false;
692  }
693  _M_state |= _S_write_entered;
694  if (!_M_gate2.wait_until(__lk, __abs_time,
695  [=]{ return _M_readers() == 0; }))
696  {
697  _M_state ^= _S_write_entered;
698  // Wake all threads blocked while the write-entered flag was set.
699  _M_gate1.notify_all();
700  return false;
701  }
702  return true;
703  }
704 
705  // Shared ownership
706 
707  template <typename _Clock, typename _Duration>
708  _GLIBCXX_NODISCARD
709  bool
710  try_lock_shared_until(const chrono::time_point<_Clock,
711  _Duration>& __abs_time)
712  {
713  unique_lock<mutex> __lk(_M_mut);
714  if (!_M_gate1.wait_until(__lk, __abs_time,
715  [=]{ return _M_state < _S_max_readers; }))
716  {
717  return false;
718  }
719  ++_M_state;
720  return true;
721  }
722 
723 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
724  };
725 #endif // _GLIBCXX_HAS_GTHREADS
726 
727  /// shared_lock
728  template<typename _Mutex>
729  class shared_lock
730  {
731  public:
732  typedef _Mutex mutex_type;
733 
734  // Shared locking
735 
736  shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
737 
738  explicit
739  shared_lock(mutex_type& __m)
740  : _M_pm(std::__addressof(__m)), _M_owns(true)
741  { __m.lock_shared(); }
742 
743  shared_lock(mutex_type& __m, defer_lock_t) noexcept
744  : _M_pm(std::__addressof(__m)), _M_owns(false) { }
745 
746  shared_lock(mutex_type& __m, try_to_lock_t)
747  : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
748 
749  shared_lock(mutex_type& __m, adopt_lock_t)
750  : _M_pm(std::__addressof(__m)), _M_owns(true) { }
751 
752  template<typename _Clock, typename _Duration>
753  shared_lock(mutex_type& __m,
754  const chrono::time_point<_Clock, _Duration>& __abs_time)
755  : _M_pm(std::__addressof(__m)),
756  _M_owns(__m.try_lock_shared_until(__abs_time)) { }
757 
758  template<typename _Rep, typename _Period>
759  shared_lock(mutex_type& __m,
760  const chrono::duration<_Rep, _Period>& __rel_time)
761  : _M_pm(std::__addressof(__m)),
762  _M_owns(__m.try_lock_shared_for(__rel_time)) { }
763 
764  ~shared_lock()
765  {
766  if (_M_owns)
767  _M_pm->unlock_shared();
768  }
769 
770  shared_lock(shared_lock const&) = delete;
771  shared_lock& operator=(shared_lock const&) = delete;
772 
773  shared_lock(shared_lock&& __sl) noexcept : shared_lock()
774  { swap(__sl); }
775 
776  shared_lock&
777  operator=(shared_lock&& __sl) noexcept
778  {
779  shared_lock(std::move(__sl)).swap(*this);
780  return *this;
781  }
782 
783  void
784  lock()
785  {
786  _M_lockable();
787  _M_pm->lock_shared();
788  _M_owns = true;
789  }
790 
791  _GLIBCXX_NODISCARD
792  bool
793  try_lock()
794  {
795  _M_lockable();
796  return _M_owns = _M_pm->try_lock_shared();
797  }
798 
799  template<typename _Rep, typename _Period>
800  _GLIBCXX_NODISCARD
801  bool
802  try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
803  {
804  _M_lockable();
805  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
806  }
807 
808  template<typename _Clock, typename _Duration>
809  _GLIBCXX_NODISCARD
810  bool
811  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
812  {
813  _M_lockable();
814  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
815  }
816 
817  void
818  unlock()
819  {
820  if (!_M_owns)
821  __throw_system_error(int(errc::operation_not_permitted));
822  _M_pm->unlock_shared();
823  _M_owns = false;
824  }
825 
826  // Setters
827 
828  void
829  swap(shared_lock& __u) noexcept
830  {
831  std::swap(_M_pm, __u._M_pm);
832  std::swap(_M_owns, __u._M_owns);
833  }
834 
835  mutex_type*
836  release() noexcept
837  {
838  _M_owns = false;
839  return std::__exchange(_M_pm, nullptr);
840  }
841 
842  // Getters
843 
844  _GLIBCXX_NODISCARD
845  bool owns_lock() const noexcept { return _M_owns; }
846 
847  explicit operator bool() const noexcept { return _M_owns; }
848 
849  _GLIBCXX_NODISCARD
850  mutex_type* mutex() const noexcept { return _M_pm; }
851 
852  private:
853  void
854  _M_lockable() const
855  {
856  if (_M_pm == nullptr)
857  __throw_system_error(int(errc::operation_not_permitted));
858  if (_M_owns)
859  __throw_system_error(int(errc::resource_deadlock_would_occur));
860  }
861 
862  mutex_type* _M_pm;
863  bool _M_owns;
864  };
865 
866  /// Swap specialization for shared_lock
867  /// @relates shared_mutex
868  template<typename _Mutex>
869  void
870  swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
871  { __x.swap(__y); }
872 
873  /// @} group mutexes
874 _GLIBCXX_END_NAMESPACE_VERSION
875 } // namespace
876 
877 #endif // C++14
878 
879 #endif // _GLIBCXX_SHARED_MUTEX