libstdc++
atomic_wait.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2020-2021 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_wait.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_WAIT_H
31 #define _GLIBCXX_ATOMIC_WAIT_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
37 #include <bits/functional_hash.h>
38 #include <bits/gthr.h>
39 #include <ext/numeric_traits.h>
40 
41 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
42 # include <cerrno>
43 # include <climits>
44 # include <unistd.h>
45 # include <syscall.h>
46 # include <bits/functexcept.h>
47 #endif
48 
49 # include <bits/std_mutex.h> // std::mutex, std::__condvar
50 
51 #define __cpp_lib_atomic_wait 201907L
52 
53 namespace std _GLIBCXX_VISIBILITY(default)
54 {
55 _GLIBCXX_BEGIN_NAMESPACE_VERSION
56  namespace __detail
57  {
58 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
59  using __platform_wait_t = int;
60  static constexpr size_t __platform_wait_alignment = 4;
61 #else
62  using __platform_wait_t = uint64_t;
63  static constexpr size_t __platform_wait_alignment
64  = __alignof__(__platform_wait_t);
65 #endif
66  } // namespace __detail
67 
68  template<typename _Tp>
69  inline constexpr bool __platform_wait_uses_type
70 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
71  = is_scalar_v<_Tp>
72  && ((sizeof(_Tp) == sizeof(__detail::__platform_wait_t))
73  && (alignof(_Tp*) >= __platform_wait_alignment));
74 #else
75  = false;
76 #endif
77 
78  namespace __detail
79  {
80 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
81 #define _GLIBCXX_HAVE_PLATFORM_WAIT 1
82  enum class __futex_wait_flags : int
83  {
84 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
85  __private_flag = 128,
86 #else
87  __private_flag = 0,
88 #endif
89  __wait = 0,
90  __wake = 1,
91  __wait_bitset = 9,
92  __wake_bitset = 10,
93  __wait_private = __wait | __private_flag,
94  __wake_private = __wake | __private_flag,
95  __wait_bitset_private = __wait_bitset | __private_flag,
96  __wake_bitset_private = __wake_bitset | __private_flag,
97  __bitset_match_any = -1
98  };
99 
100  template<typename _Tp>
101  void
102  __platform_wait(const _Tp* __addr, __platform_wait_t __val) noexcept
103  {
104  auto __e = syscall (SYS_futex, static_cast<const void*>(__addr),
105  static_cast<int>(__futex_wait_flags::__wait_private),
106  __val, nullptr);
107  if (!__e || errno == EAGAIN)
108  return;
109  if (errno != EINTR)
110  __throw_system_error(errno);
111  }
112 
113  template<typename _Tp>
114  void
115  __platform_notify(const _Tp* __addr, bool __all) noexcept
116  {
117  syscall (SYS_futex, static_cast<const void*>(__addr),
118  static_cast<int>(__futex_wait_flags::__wake_private),
119  __all ? INT_MAX : 1);
120  }
121 #else
122 // define _GLIBCX_HAVE_PLATFORM_WAIT and implement __platform_wait()
123 // and __platform_notify() if there is a more efficient primitive supported
124 // by the platform (e.g. __ulock_wait()/__ulock_wake()) which is better than
125 // a mutex/condvar based wait
126 #endif
127 
128  inline void
129  __thread_yield() noexcept
130  {
131 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
132  __gthread_yield();
133 #endif
134  }
135 
136  inline void
137  __thread_relax() noexcept
138  {
139 #if defined __i386__ || defined __x86_64__
140  __builtin_ia32_pause();
141 #else
142  __thread_yield();
143 #endif
144  }
145 
146  constexpr auto __atomic_spin_count_1 = 12;
147  constexpr auto __atomic_spin_count_2 = 4;
148 
149  struct __default_spin_policy
150  {
151  bool
152  operator()() const noexcept
153  { return false; }
154  };
155 
156  template<typename _Pred,
157  typename _Spin = __default_spin_policy>
158  bool
159  __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept
160  {
161  for (auto __i = 0; __i < __atomic_spin_count_1; ++__i)
162  {
163  if (__pred())
164  return true;
165  __detail::__thread_relax();
166  }
167 
168  for (auto __i = 0; __i < __atomic_spin_count_2; ++__i)
169  {
170  if (__pred())
171  return true;
172  __detail::__thread_yield();
173  }
174 
175  while (__spin())
176  {
177  if (__pred())
178  return true;
179  }
180 
181  return false;
182  }
183 
184  template<typename _Tp>
185  bool __atomic_compare(const _Tp& __a, const _Tp& __b)
186  {
187  // TODO make this do the correct padding bit ignoring comparison
188  return __builtin_memcmp(&__a, &__b, sizeof(_Tp)) != 0;
189  }
190 
191  struct __waiter_pool_base
192  {
193 #ifdef __cpp_lib_hardware_interference_size
194  static constexpr auto _S_align = hardware_destructive_interference_size;
195 #else
196  static constexpr auto _S_align = 64;
197 #endif
198 
199  alignas(_S_align) __platform_wait_t _M_wait = 0;
200 
201 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
202  mutex _M_mtx;
203 #endif
204 
205  alignas(_S_align) __platform_wait_t _M_ver = 0;
206 
207 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
208  __condvar _M_cv;
209 #endif
210  __waiter_pool_base() = default;
211 
212  void
213  _M_enter_wait() noexcept
214  { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_ACQ_REL); }
215 
216  void
217  _M_leave_wait() noexcept
218  { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_ACQ_REL); }
219 
220  bool
221  _M_waiting() const noexcept
222  {
223  __platform_wait_t __res;
224  __atomic_load(&_M_wait, &__res, __ATOMIC_ACQUIRE);
225  return __res > 0;
226  }
227 
228  void
229  _M_notify(const __platform_wait_t* __addr, bool __all, bool __bare) noexcept
230  {
231  if (!(__bare || _M_waiting()))
232  return;
233 
234 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
235  __platform_notify(__addr, __all);
236 #else
237  if (__all)
238  _M_cv.notify_all();
239  else
240  _M_cv.notify_one();
241 #endif
242  }
243 
244  static __waiter_pool_base&
245  _S_for(const void* __addr) noexcept
246  {
247  constexpr uintptr_t __ct = 16;
248  static __waiter_pool_base __w[__ct];
249  auto __key = (uintptr_t(__addr) >> 2) % __ct;
250  return __w[__key];
251  }
252  };
253 
254  struct __waiter_pool : __waiter_pool_base
255  {
256  void
257  _M_do_wait(const __platform_wait_t* __addr, __platform_wait_t __old) noexcept
258  {
259 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
260  __platform_wait(__addr, __old);
261 #else
262  __platform_wait_t __val;
263  __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
264  if (__val == __old)
265  {
266  lock_guard<mutex> __l(_M_mtx);
267  _M_cv.wait(_M_mtx);
268  }
269 #endif // __GLIBCXX_HAVE_PLATFORM_WAIT
270  }
271  };
272 
273  template<typename _Tp>
274  struct __waiter_base
275  {
276  using __waiter_type = _Tp;
277 
278  __waiter_type& _M_w;
279  __platform_wait_t* _M_addr;
280 
281  template<typename _Up>
282  static __platform_wait_t*
283  _S_wait_addr(const _Up* __a, __platform_wait_t* __b)
284  {
285  if constexpr (__platform_wait_uses_type<_Up>)
286  return reinterpret_cast<__platform_wait_t*>(const_cast<_Up*>(__a));
287  else
288  return __b;
289  }
290 
291  static __waiter_type&
292  _S_for(const void* __addr) noexcept
293  {
294  static_assert(sizeof(__waiter_type) == sizeof(__waiter_pool_base));
295  auto& res = __waiter_pool_base::_S_for(__addr);
296  return reinterpret_cast<__waiter_type&>(res);
297  }
298 
299  template<typename _Up>
300  explicit __waiter_base(const _Up* __addr) noexcept
301  : _M_w(_S_for(__addr))
302  , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
303  {
304  }
305 
306  void
307  _M_notify(bool __all, bool __bare = false)
308  {
309  if (_M_addr == &_M_w._M_ver)
310  __atomic_fetch_add(_M_addr, 1, __ATOMIC_ACQ_REL);
311  _M_w._M_notify(_M_addr, __all, __bare);
312  }
313 
314  template<typename _Up, typename _ValFn,
315  typename _Spin = __default_spin_policy>
316  static bool
317  _S_do_spin_v(__platform_wait_t* __addr,
318  const _Up& __old, _ValFn __vfn,
319  __platform_wait_t& __val,
320  _Spin __spin = _Spin{ })
321  {
322  auto const __pred = [=]
323  { return __detail::__atomic_compare(__old, __vfn()); };
324 
325  if constexpr (__platform_wait_uses_type<_Up>)
326  {
327  __val == __old;
328  }
329  else
330  {
331  __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
332  }
333  return __atomic_spin(__pred, __spin);
334  }
335 
336  template<typename _Up, typename _ValFn,
337  typename _Spin = __default_spin_policy>
338  bool
339  _M_do_spin_v(const _Up& __old, _ValFn __vfn,
340  __platform_wait_t& __val,
341  _Spin __spin = _Spin{ })
342  { return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
343 
344  template<typename _Pred,
345  typename _Spin = __default_spin_policy>
346  static bool
347  _S_do_spin(const __platform_wait_t* __addr,
348  _Pred __pred,
349  __platform_wait_t& __val,
350  _Spin __spin = _Spin{ })
351  {
352  __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
353  return __atomic_spin(__pred, __spin);
354  }
355 
356  template<typename _Pred,
357  typename _Spin = __default_spin_policy>
358  bool
359  _M_do_spin(_Pred __pred, __platform_wait_t& __val,
360  _Spin __spin = _Spin{ })
361  { return _S_do_spin(_M_addr, __pred, __val, __spin); }
362  };
363 
364  template<typename _EntersWait>
365  struct __waiter : __waiter_base<__waiter_pool>
366  {
367  using __base_type = __waiter_base<__waiter_pool>;
368 
369  template<typename _Tp>
370  explicit __waiter(const _Tp* __addr) noexcept
371  : __base_type(__addr)
372  {
373  if constexpr (_EntersWait::value)
374  _M_w._M_enter_wait();
375  }
376 
377  ~__waiter()
378  {
379  if constexpr (_EntersWait::value)
380  _M_w._M_leave_wait();
381  }
382 
383  template<typename _Tp, typename _ValFn>
384  void
385  _M_do_wait_v(_Tp __old, _ValFn __vfn)
386  {
387  __platform_wait_t __val;
388  if (__base_type::_M_do_spin_v(__old, __vfn, __val))
389  return;
390  __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
391  }
392 
393  template<typename _Pred>
394  void
395  _M_do_wait(_Pred __pred) noexcept
396  {
397  do
398  {
399  __platform_wait_t __val;
400  if (__base_type::_M_do_spin(__pred, __val))
401  return;
402  __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
403  }
404  while (!__pred());
405  }
406  };
407 
408  using __enters_wait = __waiter<std::true_type>;
409  using __bare_wait = __waiter<std::false_type>;
410  } // namespace __detail
411 
412  template<typename _Tp, typename _ValFn>
413  void
414  __atomic_wait_address_v(const _Tp* __addr, _Tp __old,
415  _ValFn __vfn) noexcept
416  {
417  __detail::__enters_wait __w(__addr);
418  __w._M_do_wait_v(__old, __vfn);
419  }
420 
421  template<typename _Tp, typename _Pred>
422  void
423  __atomic_wait_address(const _Tp* __addr, _Pred __pred) noexcept
424  {
425  __detail::__enters_wait __w(__addr);
426  __w._M_do_wait(__pred);
427  }
428 
429  // This call is to be used by atomic types which track contention externally
430  template<typename _Pred>
431  void
432  __atomic_wait_address_bare(const __detail::__platform_wait_t* __addr,
433  _Pred __pred) noexcept
434  {
435 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
436  do
437  {
438  __detail::__platform_wait_t __val;
439  if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
440  return;
441  __detail::__platform_wait(__addr, __val);
442  }
443  while (!__pred());
444 #else // !_GLIBCXX_HAVE_PLATFORM_WAIT
445  __detail::__bare_wait __w(__addr);
446  __w._M_do_wait(__pred);
447 #endif
448  }
449 
450  template<typename _Tp>
451  void
452  __atomic_notify_address(const _Tp* __addr, bool __all) noexcept
453  {
454  __detail::__bare_wait __w(__addr);
455  __w._M_notify(__all, true);
456  }
457 
458  // This call is to be used by atomic types which track contention externally
459  inline void
460  __atomic_notify_address_bare(const __detail::__platform_wait_t* __addr,
461  bool __all) noexcept
462  {
463 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
464  __detail::__platform_notify(__addr, __all);
465 #else
466  __detail::__bare_wait __w(__addr);
467  __w._M_notify(__all, true);
468 #endif
469  }
470 _GLIBCXX_END_NAMESPACE_VERSION
471 } // namespace std
472 #endif // GTHREADS || LINUX_FUTEX
473 #endif // _GLIBCXX_ATOMIC_WAIT_H
ISO C++ entities toplevel namespace is std.