libstdc++
shared_mutex
Go to the documentation of this file.
1 // <shared_mutex> -*- C++ -*-
2 
3 // Copyright (C) 2013-2020 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/shared_mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #if __cplusplus >= 201402L
35 
36 #include <bits/c++config.h>
37 #include <condition_variable>
38 #include <bits/functexcept.h>
39 
40 namespace std _GLIBCXX_VISIBILITY(default)
41 {
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
43 
44  /**
45  * @addtogroup mutexes
46  * @{
47  */
48 
49 #ifdef _GLIBCXX_HAS_GTHREADS
50 
51 #if __cplusplus >= 201703L
52 #define __cpp_lib_shared_mutex 201505
53  class shared_mutex;
54 #endif
55 
56 #define __cpp_lib_shared_timed_mutex 201402
57  class shared_timed_mutex;
58 
59  /// @cond undocumented
60 
61 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
62 #ifdef __gthrw
63 #define _GLIBCXX_GTHRW(name) \
64  __gthrw(pthread_ ## name); \
65  static inline int \
66  __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
67  { \
68  if (__gthread_active_p ()) \
69  return __gthrw_(pthread_ ## name) (__rwlock); \
70  else \
71  return 0; \
72  }
73  _GLIBCXX_GTHRW(rwlock_rdlock)
74  _GLIBCXX_GTHRW(rwlock_tryrdlock)
75  _GLIBCXX_GTHRW(rwlock_wrlock)
76  _GLIBCXX_GTHRW(rwlock_trywrlock)
77  _GLIBCXX_GTHRW(rwlock_unlock)
78 # ifndef PTHREAD_RWLOCK_INITIALIZER
79  _GLIBCXX_GTHRW(rwlock_destroy)
80  __gthrw(pthread_rwlock_init);
81  static inline int
82  __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
83  {
84  if (__gthread_active_p ())
85  return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
86  else
87  return 0;
88  }
89 # endif
90 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
91  __gthrw(pthread_rwlock_timedrdlock);
92  static inline int
93  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
94  const timespec *__ts)
95  {
96  if (__gthread_active_p ())
97  return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
98  else
99  return 0;
100  }
101  __gthrw(pthread_rwlock_timedwrlock);
102  static inline int
103  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
104  const timespec *__ts)
105  {
106  if (__gthread_active_p ())
107  return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
108  else
109  return 0;
110  }
111 # endif
112 #else
113  static inline int
114  __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
115  { return pthread_rwlock_rdlock (__rwlock); }
116  static inline int
117  __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
118  { return pthread_rwlock_tryrdlock (__rwlock); }
119  static inline int
120  __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
121  { return pthread_rwlock_wrlock (__rwlock); }
122  static inline int
123  __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
124  { return pthread_rwlock_trywrlock (__rwlock); }
125  static inline int
126  __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
127  { return pthread_rwlock_unlock (__rwlock); }
128  static inline int
129  __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
130  { return pthread_rwlock_destroy (__rwlock); }
131  static inline int
132  __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
133  { return pthread_rwlock_init (__rwlock, NULL); }
134 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
135  static inline int
136  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
137  const timespec *__ts)
138  { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
139  static inline int
140  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
141  const timespec *__ts)
142  { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
143 # endif
144 #endif
145 
146  /// A shared mutex type implemented using pthread_rwlock_t.
147  class __shared_mutex_pthread
148  {
149  friend class shared_timed_mutex;
150 
151 #ifdef PTHREAD_RWLOCK_INITIALIZER
152  pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
153 
154  public:
155  __shared_mutex_pthread() = default;
156  ~__shared_mutex_pthread() = default;
157 #else
158  pthread_rwlock_t _M_rwlock;
159 
160  public:
161  __shared_mutex_pthread()
162  {
163  int __ret = __glibcxx_rwlock_init(&_M_rwlock);
164  if (__ret == ENOMEM)
165  __throw_bad_alloc();
166  else if (__ret == EAGAIN)
167  __throw_system_error(int(errc::resource_unavailable_try_again));
168  else if (__ret == EPERM)
169  __throw_system_error(int(errc::operation_not_permitted));
170  // Errors not handled: EBUSY, EINVAL
171  __glibcxx_assert(__ret == 0);
172  }
173 
174  ~__shared_mutex_pthread()
175  {
176  int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
177  // Errors not handled: EBUSY, EINVAL
178  __glibcxx_assert(__ret == 0);
179  }
180 #endif
181 
182  __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
183  __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
184 
185  void
186  lock()
187  {
188  int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
189  if (__ret == EDEADLK)
190  __throw_system_error(int(errc::resource_deadlock_would_occur));
191  // Errors not handled: EINVAL
192  __glibcxx_assert(__ret == 0);
193  }
194 
195  bool
196  try_lock()
197  {
198  int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
199  if (__ret == EBUSY) return false;
200  // Errors not handled: EINVAL
201  __glibcxx_assert(__ret == 0);
202  return true;
203  }
204 
205  void
206  unlock()
207  {
208  int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
209  // Errors not handled: EPERM, EBUSY, EINVAL
210  __glibcxx_assert(__ret == 0);
211  }
212 
213  // Shared ownership
214 
215  void
216  lock_shared()
217  {
218  int __ret;
219  // We retry if we exceeded the maximum number of read locks supported by
220  // the POSIX implementation; this can result in busy-waiting, but this
221  // is okay based on the current specification of forward progress
222  // guarantees by the standard.
223  do
224  __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
225  while (__ret == EAGAIN);
226  if (__ret == EDEADLK)
227  __throw_system_error(int(errc::resource_deadlock_would_occur));
228  // Errors not handled: EINVAL
229  __glibcxx_assert(__ret == 0);
230  }
231 
232  bool
233  try_lock_shared()
234  {
235  int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
236  // If the maximum number of read locks has been exceeded, we just fail
237  // to acquire the lock. Unlike for lock(), we are not allowed to throw
238  // an exception.
239  if (__ret == EBUSY || __ret == EAGAIN) return false;
240  // Errors not handled: EINVAL
241  __glibcxx_assert(__ret == 0);
242  return true;
243  }
244 
245  void
246  unlock_shared()
247  {
248  unlock();
249  }
250 
251  void* native_handle() { return &_M_rwlock; }
252  };
253 #endif
254 
255 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
256  /// A shared mutex type implemented using std::condition_variable.
257  class __shared_mutex_cv
258  {
259  friend class shared_timed_mutex;
260 
261  // Based on Howard Hinnant's reference implementation from N2406.
262 
263  // The high bit of _M_state is the write-entered flag which is set to
264  // indicate a writer has taken the lock or is queuing to take the lock.
265  // The remaining bits are the count of reader locks.
266  //
267  // To take a reader lock, block on gate1 while the write-entered flag is
268  // set or the maximum number of reader locks is held, then increment the
269  // reader lock count.
270  // To release, decrement the count, then if the write-entered flag is set
271  // and the count is zero then signal gate2 to wake a queued writer,
272  // otherwise if the maximum number of reader locks was held signal gate1
273  // to wake a reader.
274  //
275  // To take a writer lock, block on gate1 while the write-entered flag is
276  // set, then set the write-entered flag to start queueing, then block on
277  // gate2 while the number of reader locks is non-zero.
278  // To release, unset the write-entered flag and signal gate1 to wake all
279  // blocked readers and writers.
280  //
281  // This means that when no reader locks are held readers and writers get
282  // equal priority. When one or more reader locks is held a writer gets
283  // priority and no more reader locks can be taken while the writer is
284  // queued.
285 
286  // Only locked when accessing _M_state or waiting on condition variables.
287  mutex _M_mut;
288  // Used to block while write-entered is set or reader count at maximum.
289  condition_variable _M_gate1;
290  // Used to block queued writers while reader count is non-zero.
291  condition_variable _M_gate2;
292  // The write-entered flag and reader count.
293  unsigned _M_state;
294 
295  static constexpr unsigned _S_write_entered
296  = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
297  static constexpr unsigned _S_max_readers = ~_S_write_entered;
298 
299  // Test whether the write-entered flag is set. _M_mut must be locked.
300  bool _M_write_entered() const { return _M_state & _S_write_entered; }
301 
302  // The number of reader locks currently held. _M_mut must be locked.
303  unsigned _M_readers() const { return _M_state & _S_max_readers; }
304 
305  public:
306  __shared_mutex_cv() : _M_state(0) {}
307 
308  ~__shared_mutex_cv()
309  {
310  __glibcxx_assert( _M_state == 0 );
311  }
312 
313  __shared_mutex_cv(const __shared_mutex_cv&) = delete;
314  __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
315 
316  // Exclusive ownership
317 
318  void
319  lock()
320  {
321  unique_lock<mutex> __lk(_M_mut);
322  // Wait until we can set the write-entered flag.
323  _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
324  _M_state |= _S_write_entered;
325  // Then wait until there are no more readers.
326  _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
327  }
328 
329  bool
330  try_lock()
331  {
332  unique_lock<mutex> __lk(_M_mut, try_to_lock);
333  if (__lk.owns_lock() && _M_state == 0)
334  {
335  _M_state = _S_write_entered;
336  return true;
337  }
338  return false;
339  }
340 
341  void
342  unlock()
343  {
344  lock_guard<mutex> __lk(_M_mut);
345  __glibcxx_assert( _M_write_entered() );
346  _M_state = 0;
347  // call notify_all() while mutex is held so that another thread can't
348  // lock and unlock the mutex then destroy *this before we make the call.
349  _M_gate1.notify_all();
350  }
351 
352  // Shared ownership
353 
354  void
355  lock_shared()
356  {
357  unique_lock<mutex> __lk(_M_mut);
358  _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
359  ++_M_state;
360  }
361 
362  bool
363  try_lock_shared()
364  {
365  unique_lock<mutex> __lk(_M_mut, try_to_lock);
366  if (!__lk.owns_lock())
367  return false;
368  if (_M_state < _S_max_readers)
369  {
370  ++_M_state;
371  return true;
372  }
373  return false;
374  }
375 
376  void
377  unlock_shared()
378  {
379  lock_guard<mutex> __lk(_M_mut);
380  __glibcxx_assert( _M_readers() > 0 );
381  auto __prev = _M_state--;
382  if (_M_write_entered())
383  {
384  // Wake the queued writer if there are no more readers.
385  if (_M_readers() == 0)
386  _M_gate2.notify_one();
387  // No need to notify gate1 because we give priority to the queued
388  // writer, and that writer will eventually notify gate1 after it
389  // clears the write-entered flag.
390  }
391  else
392  {
393  // Wake any thread that was blocked on reader overflow.
394  if (__prev == _S_max_readers)
395  _M_gate1.notify_one();
396  }
397  }
398  };
399 #endif
400  /// @endcond
401 
402 #if __cplusplus > 201402L
403  /// The standard shared mutex type.
404  class
405  _GLIBCXX_THREAD_SAFETY_ANNOTATION(capability("shared_mutex")) shared_mutex
406  {
407  public:
408  shared_mutex() = default;
409  ~shared_mutex() = default;
410 
411  shared_mutex(const shared_mutex&) = delete;
412  shared_mutex& operator=(const shared_mutex&) = delete;
413 
414  // Exclusive ownership
415 
416  void lock() _GLIBCXX_THREAD_SAFETY_ANNOTATION(acquire_capability())
417  { _M_impl.lock(); }
418 
419  bool try_lock()
420  _GLIBCXX_THREAD_SAFETY_ANNOTATION(try_acquire_capability(true))
421  { return _M_impl.try_lock(); }
422 
423  void unlock() _GLIBCXX_THREAD_SAFETY_ANNOTATION(release_capability())
424  { _M_impl.unlock(); }
425 
426  // Shared ownership
427 
428  void lock_shared()
429  _GLIBCXX_THREAD_SAFETY_ANNOTATION(acquire_shared_capability())
430  { _M_impl.lock_shared(); }
431 
432  bool try_lock_shared()
433  _GLIBCXX_THREAD_SAFETY_ANNOTATION(try_acquire_shared_capability(true))
434  { return _M_impl.try_lock_shared(); }
435 
436  void unlock_shared()
437  _GLIBCXX_THREAD_SAFETY_ANNOTATION(release_shared_capability())
438  { _M_impl.unlock_shared(); }
439 
440 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
441  typedef void* native_handle_type;
442  native_handle_type native_handle() { return _M_impl.native_handle(); }
443 
444  private:
445  __shared_mutex_pthread _M_impl;
446 #else
447  private:
448  __shared_mutex_cv _M_impl;
449 #endif
450  };
451 #endif // C++17
452 
453  /// @cond undocumented
454 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
455  using __shared_timed_mutex_base = __shared_mutex_pthread;
456 #else
457  using __shared_timed_mutex_base = __shared_mutex_cv;
458 #endif
459  /// @endcond
460 
461  /// The standard shared timed mutex type.
462  class shared_timed_mutex
463  : private __shared_timed_mutex_base
464  {
465  using _Base = __shared_timed_mutex_base;
466 
467  // Must use the same clock as condition_variable for __shared_mutex_cv.
468 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
469  using __clock_t = chrono::steady_clock;
470 #else
471  using __clock_t = chrono::system_clock;
472 #endif
473 
474  public:
475  shared_timed_mutex() = default;
476  ~shared_timed_mutex() = default;
477 
478  shared_timed_mutex(const shared_timed_mutex&) = delete;
479  shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
480 
481  // Exclusive ownership
482 
483  void lock() { _Base::lock(); }
484  bool try_lock() { return _Base::try_lock(); }
485  void unlock() { _Base::unlock(); }
486 
487  template<typename _Rep, typename _Period>
488  bool
489  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
490  {
491  auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
492  if (ratio_greater<__clock_t::period, _Period>())
493  ++__rt;
494  return try_lock_until(__clock_t::now() + __rt);
495  }
496 
497  // Shared ownership
498 
499  void lock_shared() { _Base::lock_shared(); }
500  bool try_lock_shared() { return _Base::try_lock_shared(); }
501  void unlock_shared() { _Base::unlock_shared(); }
502 
503  template<typename _Rep, typename _Period>
504  bool
505  try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
506  {
507  auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
508  if (ratio_greater<__clock_t::period, _Period>())
509  ++__rt;
510  return try_lock_shared_until(__clock_t::now() + __rt);
511  }
512 
513 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
514 
515  // Exclusive ownership
516 
517  template<typename _Duration>
518  bool
519  try_lock_until(const chrono::time_point<chrono::system_clock,
520  _Duration>& __atime)
521  {
522  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
523  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
524 
525  __gthread_time_t __ts =
526  {
527  static_cast<std::time_t>(__s.time_since_epoch().count()),
528  static_cast<long>(__ns.count())
529  };
530 
531  int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
532  // On self-deadlock, we just fail to acquire the lock. Technically,
533  // the program violated the precondition.
534  if (__ret == ETIMEDOUT || __ret == EDEADLK)
535  return false;
536  // Errors not handled: EINVAL
537  __glibcxx_assert(__ret == 0);
538  return true;
539  }
540 
541 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
542  template<typename _Duration>
543  bool
544  try_lock_until(const chrono::time_point<chrono::steady_clock,
545  _Duration>& __atime)
546  {
547  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
548  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
549 
550  __gthread_time_t __ts =
551  {
552  static_cast<std::time_t>(__s.time_since_epoch().count()),
553  static_cast<long>(__ns.count())
554  };
555 
556  int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
557  &__ts);
558  // On self-deadlock, we just fail to acquire the lock. Technically,
559  // the program violated the precondition.
560  if (__ret == ETIMEDOUT || __ret == EDEADLK)
561  return false;
562  // Errors not handled: EINVAL
563  __glibcxx_assert(__ret == 0);
564  return true;
565  }
566 #endif
567 
568  template<typename _Clock, typename _Duration>
569  bool
570  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
571  {
572 #if __cplusplus > 201703L
573  static_assert(chrono::is_clock_v<_Clock>);
574 #endif
575  // The user-supplied clock may not tick at the same rate as
576  // steady_clock, so we must loop in order to guarantee that
577  // the timeout has expired before returning false.
578  typename _Clock::time_point __now = _Clock::now();
579  do {
580  auto __rtime = __atime - __now;
581  if (try_lock_for(__rtime))
582  return true;
583  __now = _Clock::now();
584  } while (__atime > __now);
585  return false;
586  }
587 
588  // Shared ownership
589 
590  template<typename _Duration>
591  bool
592  try_lock_shared_until(const chrono::time_point<chrono::system_clock,
593  _Duration>& __atime)
594  {
595  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
596  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
597 
598  __gthread_time_t __ts =
599  {
600  static_cast<std::time_t>(__s.time_since_epoch().count()),
601  static_cast<long>(__ns.count())
602  };
603 
604  int __ret;
605  // Unlike for lock(), we are not allowed to throw an exception so if
606  // the maximum number of read locks has been exceeded, or we would
607  // deadlock, we just try to acquire the lock again (and will time out
608  // eventually).
609  // In cases where we would exceed the maximum number of read locks
610  // throughout the whole time until the timeout, we will fail to
611  // acquire the lock even if it would be logically free; however, this
612  // is allowed by the standard, and we made a "strong effort"
613  // (see C++14 30.4.1.4p26).
614  // For cases where the implementation detects a deadlock we
615  // intentionally block and timeout so that an early return isn't
616  // mistaken for a spurious failure, which might help users realise
617  // there is a deadlock.
618  do
619  __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
620  while (__ret == EAGAIN || __ret == EDEADLK);
621  if (__ret == ETIMEDOUT)
622  return false;
623  // Errors not handled: EINVAL
624  __glibcxx_assert(__ret == 0);
625  return true;
626  }
627 
628 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
629  template<typename _Duration>
630  bool
631  try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
632  _Duration>& __atime)
633  {
634  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
635  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
636 
637  __gthread_time_t __ts =
638  {
639  static_cast<std::time_t>(__s.time_since_epoch().count()),
640  static_cast<long>(__ns.count())
641  };
642 
643  int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
644  &__ts);
645  // On self-deadlock, we just fail to acquire the lock. Technically,
646  // the program violated the precondition.
647  if (__ret == ETIMEDOUT || __ret == EDEADLK)
648  return false;
649  // Errors not handled: EINVAL
650  __glibcxx_assert(__ret == 0);
651  return true;
652  }
653 #endif
654 
655  template<typename _Clock, typename _Duration>
656  bool
657  try_lock_shared_until(const chrono::time_point<_Clock,
658  _Duration>& __atime)
659  {
660 #if __cplusplus > 201703L
661  static_assert(chrono::is_clock_v<_Clock>);
662 #endif
663  // The user-supplied clock may not tick at the same rate as
664  // steady_clock, so we must loop in order to guarantee that
665  // the timeout has expired before returning false.
666  typename _Clock::time_point __now = _Clock::now();
667  do {
668  auto __rtime = __atime - __now;
669  if (try_lock_shared_for(__rtime))
670  return true;
671  __now = _Clock::now();
672  } while (__atime > __now);
673  return false;
674  }
675 
676 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
677 
678  // Exclusive ownership
679 
680  template<typename _Clock, typename _Duration>
681  bool
682  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
683  {
684  unique_lock<mutex> __lk(_M_mut);
685  if (!_M_gate1.wait_until(__lk, __abs_time,
686  [=]{ return !_M_write_entered(); }))
687  {
688  return false;
689  }
690  _M_state |= _S_write_entered;
691  if (!_M_gate2.wait_until(__lk, __abs_time,
692  [=]{ return _M_readers() == 0; }))
693  {
694  _M_state ^= _S_write_entered;
695  // Wake all threads blocked while the write-entered flag was set.
696  _M_gate1.notify_all();
697  return false;
698  }
699  return true;
700  }
701 
702  // Shared ownership
703 
704  template <typename _Clock, typename _Duration>
705  bool
706  try_lock_shared_until(const chrono::time_point<_Clock,
707  _Duration>& __abs_time)
708  {
709  unique_lock<mutex> __lk(_M_mut);
710  if (!_M_gate1.wait_until(__lk, __abs_time,
711  [=]{ return _M_state < _S_max_readers; }))
712  {
713  return false;
714  }
715  ++_M_state;
716  return true;
717  }
718 
719 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
720  };
721 #endif // _GLIBCXX_HAS_GTHREADS
722 
723  /// shared_lock
724  template<typename _Mutex>
725  class shared_lock
726  {
727  public:
728  typedef _Mutex mutex_type;
729 
730  // Shared locking
731 
732  shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
733 
734  explicit
735  shared_lock(mutex_type& __m)
736  : _M_pm(std::__addressof(__m)), _M_owns(true)
737  { __m.lock_shared(); }
738 
739  shared_lock(mutex_type& __m, defer_lock_t) noexcept
740  : _M_pm(std::__addressof(__m)), _M_owns(false) { }
741 
742  shared_lock(mutex_type& __m, try_to_lock_t)
743  : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
744 
745  shared_lock(mutex_type& __m, adopt_lock_t)
746  : _M_pm(std::__addressof(__m)), _M_owns(true) { }
747 
748  template<typename _Clock, typename _Duration>
749  shared_lock(mutex_type& __m,
750  const chrono::time_point<_Clock, _Duration>& __abs_time)
751  : _M_pm(std::__addressof(__m)),
752  _M_owns(__m.try_lock_shared_until(__abs_time)) { }
753 
754  template<typename _Rep, typename _Period>
755  shared_lock(mutex_type& __m,
756  const chrono::duration<_Rep, _Period>& __rel_time)
757  : _M_pm(std::__addressof(__m)),
758  _M_owns(__m.try_lock_shared_for(__rel_time)) { }
759 
760  ~shared_lock()
761  {
762  if (_M_owns)
763  _M_pm->unlock_shared();
764  }
765 
766  shared_lock(shared_lock const&) = delete;
767  shared_lock& operator=(shared_lock const&) = delete;
768 
769  shared_lock(shared_lock&& __sl) noexcept : shared_lock()
770  { swap(__sl); }
771 
772  shared_lock&
773  operator=(shared_lock&& __sl) noexcept
774  {
775  shared_lock(std::move(__sl)).swap(*this);
776  return *this;
777  }
778 
779  void
780  lock()
781  {
782  _M_lockable();
783  _M_pm->lock_shared();
784  _M_owns = true;
785  }
786 
787  bool
788  try_lock()
789  {
790  _M_lockable();
791  return _M_owns = _M_pm->try_lock_shared();
792  }
793 
794  template<typename _Rep, typename _Period>
795  bool
796  try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
797  {
798  _M_lockable();
799  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
800  }
801 
802  template<typename _Clock, typename _Duration>
803  bool
804  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
805  {
806  _M_lockable();
807  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
808  }
809 
810  void
811  unlock()
812  {
813  if (!_M_owns)
814  __throw_system_error(int(errc::resource_deadlock_would_occur));
815  _M_pm->unlock_shared();
816  _M_owns = false;
817  }
818 
819  // Setters
820 
821  void
822  swap(shared_lock& __u) noexcept
823  {
824  std::swap(_M_pm, __u._M_pm);
825  std::swap(_M_owns, __u._M_owns);
826  }
827 
828  mutex_type*
829  release() noexcept
830  {
831  _M_owns = false;
832  return std::exchange(_M_pm, nullptr);
833  }
834 
835  // Getters
836 
837  bool owns_lock() const noexcept { return _M_owns; }
838 
839  explicit operator bool() const noexcept { return _M_owns; }
840 
841  mutex_type* mutex() const noexcept { return _M_pm; }
842 
843  private:
844  void
845  _M_lockable() const
846  {
847  if (_M_pm == nullptr)
848  __throw_system_error(int(errc::operation_not_permitted));
849  if (_M_owns)
850  __throw_system_error(int(errc::resource_deadlock_would_occur));
851  }
852 
853  mutex_type* _M_pm;
854  bool _M_owns;
855  };
856 
857  /// Swap specialization for shared_lock
858  /// @relates shared_mutex
859  template<typename _Mutex>
860  void
861  swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
862  { __x.swap(__y); }
863 
864  // @} group mutexes
865 _GLIBCXX_END_NAMESPACE_VERSION
866 } // namespace
867 
868 #endif // C++14
869 
870 #endif // _GLIBCXX_SHARED_MUTEX