xenium
lock_free_ref_count.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_LOCK_FREE_REF_COUNT_HPP
7 #define XENIUM_LOCK_FREE_REF_COUNT_HPP
8 
9 #include <xenium/reclamation/detail/concurrent_ptr.hpp>
10 #include <xenium/reclamation/detail/guard_ptr.hpp>
11 #include <xenium/reclamation/detail/allocation_tracker.hpp>
12 
13 #include <xenium/acquire_guard.hpp>
14 #include <xenium/parameter.hpp>
15 
16 #include <memory>
17 
18 namespace xenium {
19 
20 namespace policy {
32  template <bool Value> struct insert_padding;
33 
45  template <std::size_t Value> struct thread_local_free_list_size;
46 }
47 
48 namespace reclamation {
49  template <
50  bool InsertPadding = false,
51  std::size_t ThreadLocalFreeListSize = 0
52  >
53  struct lock_free_ref_count_traits {
54  static constexpr bool insert_padding = InsertPadding;
55  static constexpr std::size_t thread_local_free_list_size = ThreadLocalFreeListSize;
56 
57  template <class... Policies>
58  using with = lock_free_ref_count_traits<
59  parameter::value_param_t<bool, policy::insert_padding, InsertPadding, Policies...>::value,
60  parameter::value_param_t<std::size_t, policy::thread_local_free_list_size, ThreadLocalFreeListSize, Policies...>::value
61  >;
62  };
63 
81  template <class Traits = lock_free_ref_count_traits<>>
83  {
84  template <class T, class MarkedPtr>
85  class guard_ptr;
86 
87  public:
88  template <class... Policies>
89  using with = lock_free_ref_count<typename Traits::template with<Policies...>>;
90 
91  template <class T, std::size_t N = T::number_of_mark_bits>
93 
94  template <class T, std::size_t N = 0, class DeleterT = std::default_delete<T>>
95  class enable_concurrent_ptr;
96 
97  class region_guard {};
98 
99  ALLOCATION_TRACKER
100  private:
101  static constexpr unsigned RefCountInc = 2;
102  static constexpr unsigned RefCountClaimBit = 1;
103 
104  ALLOCATION_TRACKING_FUNCTIONS;
105 #ifdef TRACK_ALLOCATIONS
106  inline static thread_local detail::registered_allocation_counter<lock_free_ref_count> allocation_counter_;
107  static detail::allocation_counter& allocation_counter();
108 #endif
109  };
110 
111  template <class Traits>
112  template <class T, std::size_t N, class DeleterT>
113  class lock_free_ref_count<Traits>::enable_concurrent_ptr:
114  private detail::tracked_object<lock_free_ref_count>
115  {
116  protected:
117  enable_concurrent_ptr() noexcept
118  {
119  destroyed().store(false, std::memory_order_relaxed);
120  }
121  enable_concurrent_ptr(const enable_concurrent_ptr&) noexcept = delete;
122  enable_concurrent_ptr(enable_concurrent_ptr&&) noexcept = delete;
123  enable_concurrent_ptr& operator=(const enable_concurrent_ptr&) noexcept = delete;
124  enable_concurrent_ptr& operator=(enable_concurrent_ptr&&) noexcept = delete;
125  virtual ~enable_concurrent_ptr() noexcept
126  {
127  assert(!is_destroyed());
128  destroyed().store(true, std::memory_order_relaxed);
129  }
130  public:
131  using Deleter = DeleterT;
132  static_assert(std::is_same<Deleter, std::default_delete<T>>::value,
133  "lock_free_ref_count reclamation can only be used with std::default_delete as Deleter.");
134 
135  static constexpr std::size_t number_of_mark_bits = N;
136  unsigned refs() const { return getHeader()->ref_count.load(std::memory_order_relaxed) >> 1; }
137 
138  void* operator new(size_t sz);
139  void operator delete(void* p);
140 
141  private:
142  bool decrement_refcnt();
143  bool is_destroyed() const { return getHeader()->destroyed.load(std::memory_order_relaxed); }
144  void push_to_free_list() { global_free_list.push(static_cast<T*>(this)); }
145 
146  struct unpadded_header {
147  std::atomic<unsigned> ref_count;
148  std::atomic<bool> destroyed;
149  concurrent_ptr<T, N> next_free;
150  };
151  struct padded_header : unpadded_header {
152  char padding[64 - sizeof(unpadded_header)];
153  };
154  using header = std::conditional_t<Traits::insert_padding, padded_header, unpadded_header>;
155  header* getHeader() { return static_cast<header*>(static_cast<void*>(this)) - 1; }
156  const header* getHeader() const { return static_cast<const header*>(static_cast<const void*>(this)) - 1; }
157 
158  std::atomic<unsigned>& ref_count() { return getHeader()->ref_count; }
159  std::atomic<bool>& destroyed() { return getHeader()->destroyed; }
160  concurrent_ptr<T, N>& next_free() { return getHeader()->next_free; }
161 
162  friend class lock_free_ref_count;
163 
164  using guard_ptr = typename concurrent_ptr<T, N>::guard_ptr;
165  using marked_ptr = typename concurrent_ptr<T, N>::marked_ptr;
166 
167  class free_list;
168  static free_list global_free_list;
169  };
170 
171  template <class Traits>
172  template <class T, class MarkedPtr>
173  class lock_free_ref_count<Traits>::guard_ptr :
174  public detail::guard_ptr<T, MarkedPtr, guard_ptr<T, MarkedPtr>>
175  {
176  using base = detail::guard_ptr<T, MarkedPtr, guard_ptr>;
177  using Deleter = typename T::Deleter;
178  public:
179  template <class, std::size_t, class>
180  friend class enable_concurrent_ptr;
181 
182  // Guard a marked ptr.
183  explicit guard_ptr(const MarkedPtr& p = MarkedPtr()) noexcept;
184  guard_ptr(const guard_ptr& p) noexcept;
185  guard_ptr(guard_ptr&& p) noexcept;
186 
187  guard_ptr& operator=(const guard_ptr& p);
188  guard_ptr& operator=(guard_ptr&& p) noexcept;
189 
190  // Atomically take snapshot of p, and *if* it points to unreclaimed object, acquire shared ownership of it.
191  void acquire(const concurrent_ptr<T>& p, std::memory_order order = std::memory_order_seq_cst) noexcept;
192 
193  // Like acquire, but quit early if a snapshot != expected.
194  bool acquire_if_equal(const concurrent_ptr<T>& p,
195  const MarkedPtr& expected,
196  std::memory_order order = std::memory_order_seq_cst) noexcept;
197 
198  // Release ownership. Postcondition: get() == nullptr.
199  void reset() noexcept;
200 
201  // Reset. Deleter d will be applied some time after all owners release their ownership.
202  void reclaim(Deleter d = Deleter()) noexcept;
203  };
204 }}
205 
206 #define LOCK_FREE_REF_COUNT_IMPL
207 #include <xenium/reclamation/impl/lock_free_ref_count.hpp>
208 #undef LOCK_FREE_REF_COUNT_IMPL
209 
210 #endif
xenium::reclamation::detail::concurrent_ptr
T must be derived from enable_concurrent_ptr<T>. D is a deleter.
Definition: concurrent_ptr.hpp:21
xenium::policy::insert_padding
Policy to configure whether to insert padding after the internal header for lock_free_ref_count recla...
Definition: lock_free_ref_count.hpp:32
xenium::reclamation::lock_free_ref_count
An implementation of the lock-free reference counting (LFRC) schemea as proposed by Valois [Val95,...
Definition: lock_free_ref_count.hpp:83
xenium::policy::thread_local_free_list_size
Policy to configure the size of thread-local free-lists for lock reclamation.
Definition: lock_free_ref_count.hpp:45