libstdc++
bits/hashtable.h
Go to the documentation of this file.
1 // hashtable.h header -*- C++ -*-
2 
3 // Copyright (C) 2007-2023 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/hashtable.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28  */
29 
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/hashtable_policy.h>
37 #include <bits/stl_function.h> // __has_is_transparent_t
38 #if __cplusplus > 201402L
39 # include <bits/node_handle.h>
40 #endif
41 
42 namespace std _GLIBCXX_VISIBILITY(default)
43 {
44 _GLIBCXX_BEGIN_NAMESPACE_VERSION
45 /// @cond undocumented
46 
47  template<typename _Tp, typename _Hash>
48  using __cache_default
49  = __not_<__and_<// Do not cache for fast hasher.
50  __is_fast_hash<_Hash>,
51  // Mandatory to have erase not throwing.
52  __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
53 
54  // Helper to conditionally delete the default constructor.
55  // The _Hash_node_base type is used to distinguish this specialization
56  // from any other potentially-overlapping subobjects of the hashtable.
57  template<typename _Equal, typename _Hash, typename _Allocator>
58  using _Hashtable_enable_default_ctor
59  = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
60  is_default_constructible<_Hash>,
61  is_default_constructible<_Allocator>>{},
62  __detail::_Hash_node_base>;
63 
64  /**
65  * Primary class template _Hashtable.
66  *
67  * @ingroup hashtable-detail
68  *
69  * @tparam _Value CopyConstructible type.
70  *
71  * @tparam _Key CopyConstructible type.
72  *
73  * @tparam _Alloc An allocator type
74  * ([lib.allocator.requirements]) whose _Alloc::value_type is
75  * _Value. As a conforming extension, we allow for
76  * _Alloc::value_type != _Value.
77  *
78  * @tparam _ExtractKey Function object that takes an object of type
79  * _Value and returns a value of type _Key.
80  *
81  * @tparam _Equal Function object that takes two objects of type k
82  * and returns a bool-like value that is true if the two objects
83  * are considered equal.
84  *
85  * @tparam _Hash The hash function. A unary function object with
86  * argument type _Key and result type size_t. Return values should
87  * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
88  *
89  * @tparam _RangeHash The range-hashing function (in the terminology of
90  * Tavori and Dreizin). A binary function object whose argument
91  * types and result type are all size_t. Given arguments r and N,
92  * the return value is in the range [0, N).
93  *
94  * @tparam _Unused Not used.
95  *
96  * @tparam _RehashPolicy Policy class with three members, all of
97  * which govern the bucket count. _M_next_bkt(n) returns a bucket
98  * count no smaller than n. _M_bkt_for_elements(n) returns a
99  * bucket count appropriate for an element count of n.
100  * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
101  * current bucket count is n_bkt and the current element count is
102  * n_elt, we need to increase the bucket count for n_ins insertions.
103  * If so, returns make_pair(true, n), where n is the new bucket count. If
104  * not, returns make_pair(false, <anything>)
105  *
106  * @tparam _Traits Compile-time class with three boolean
107  * std::integral_constant members: __cache_hash_code, __constant_iterators,
108  * __unique_keys.
109  *
110  * Each _Hashtable data structure has:
111  *
112  * - _Bucket[] _M_buckets
113  * - _Hash_node_base _M_before_begin
114  * - size_type _M_bucket_count
115  * - size_type _M_element_count
116  *
117  * with _Bucket being _Hash_node_base* and _Hash_node containing:
118  *
119  * - _Hash_node* _M_next
120  * - Tp _M_value
121  * - size_t _M_hash_code if cache_hash_code is true
122  *
123  * In terms of Standard containers the hashtable is like the aggregation of:
124  *
125  * - std::forward_list<_Node> containing the elements
126  * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
127  *
128  * The non-empty buckets contain the node before the first node in the
129  * bucket. This design makes it possible to implement something like a
130  * std::forward_list::insert_after on container insertion and
131  * std::forward_list::erase_after on container erase
132  * calls. _M_before_begin is equivalent to
133  * std::forward_list::before_begin. Empty buckets contain
134  * nullptr. Note that one of the non-empty buckets contains
135  * &_M_before_begin which is not a dereferenceable node so the
136  * node pointer in a bucket shall never be dereferenced, only its
137  * next node can be.
138  *
139  * Walking through a bucket's nodes requires a check on the hash code to
140  * see if each node is still in the bucket. Such a design assumes a
141  * quite efficient hash functor and is one of the reasons it is
142  * highly advisable to set __cache_hash_code to true.
143  *
144  * The container iterators are simply built from nodes. This way
145  * incrementing the iterator is perfectly efficient independent of
146  * how many empty buckets there are in the container.
147  *
148  * On insert we compute the element's hash code and use it to find the
149  * bucket index. If the element must be inserted in an empty bucket
150  * we add it at the beginning of the singly linked list and make the
151  * bucket point to _M_before_begin. The bucket that used to point to
152  * _M_before_begin, if any, is updated to point to its new before
153  * begin node.
154  *
155  * On erase, the simple iterator design requires using the hash
156  * functor to get the index of the bucket to update. For this
157  * reason, when __cache_hash_code is set to false the hash functor must
158  * not throw and this is enforced by a static assertion.
159  *
160  * Functionality is implemented by decomposition into base classes,
161  * where the derived _Hashtable class is used in _Map_base,
162  * _Insert, _Rehash_base, and _Equality base classes to access the
163  * "this" pointer. _Hashtable_base is used in the base classes as a
164  * non-recursive, fully-completed-type so that detailed nested type
165  * information, such as iterator type and node type, can be
166  * used. This is similar to the "Curiously Recurring Template
167  * Pattern" (CRTP) technique, but uses a reconstructed, not
168  * explicitly passed, template pattern.
169  *
170  * Base class templates are:
171  * - __detail::_Hashtable_base
172  * - __detail::_Map_base
173  * - __detail::_Insert
174  * - __detail::_Rehash_base
175  * - __detail::_Equality
176  */
177  template<typename _Key, typename _Value, typename _Alloc,
178  typename _ExtractKey, typename _Equal,
179  typename _Hash, typename _RangeHash, typename _Unused,
180  typename _RehashPolicy, typename _Traits>
181  class _Hashtable
182  : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
183  _Hash, _RangeHash, _Unused, _Traits>,
184  public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
185  _Hash, _RangeHash, _Unused,
186  _RehashPolicy, _Traits>,
187  public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
188  _Hash, _RangeHash, _Unused,
189  _RehashPolicy, _Traits>,
190  public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
191  _Hash, _RangeHash, _Unused,
192  _RehashPolicy, _Traits>,
193  public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
194  _Hash, _RangeHash, _Unused,
195  _RehashPolicy, _Traits>,
196  private __detail::_Hashtable_alloc<
197  __alloc_rebind<_Alloc,
198  __detail::_Hash_node<_Value,
199  _Traits::__hash_cached::value>>>,
200  private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
201  {
202  static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
203  "unordered container must have a non-const, non-volatile value_type");
204 #if __cplusplus > 201703L || defined __STRICT_ANSI__
205  static_assert(is_same<typename _Alloc::value_type, _Value>{},
206  "unordered container must have the same value_type as its allocator");
207 #endif
208 
209  using __traits_type = _Traits;
210  using __hash_cached = typename __traits_type::__hash_cached;
211  using __constant_iterators = typename __traits_type::__constant_iterators;
212  using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
213  using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
214 
215  using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
216 
217  using __node_value_type =
218  __detail::_Hash_node_value<_Value, __hash_cached::value>;
219  using __node_ptr = typename __hashtable_alloc::__node_ptr;
220  using __value_alloc_traits =
221  typename __hashtable_alloc::__value_alloc_traits;
222  using __node_alloc_traits =
223  typename __hashtable_alloc::__node_alloc_traits;
224  using __node_base = typename __hashtable_alloc::__node_base;
225  using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
226  using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
227 
228  using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
229  _Equal, _Hash,
230  _RangeHash, _Unused,
231  _RehashPolicy, _Traits>;
232  using __enable_default_ctor
233  = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
234 
235  public:
236  typedef _Key key_type;
237  typedef _Value value_type;
238  typedef _Alloc allocator_type;
239  typedef _Equal key_equal;
240 
241  // mapped_type, if present, comes from _Map_base.
242  // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
243  typedef typename __value_alloc_traits::pointer pointer;
244  typedef typename __value_alloc_traits::const_pointer const_pointer;
245  typedef value_type& reference;
246  typedef const value_type& const_reference;
247 
248  using iterator = typename __insert_base::iterator;
249 
250  using const_iterator = typename __insert_base::const_iterator;
251 
252  using local_iterator = __detail::_Local_iterator<key_type, _Value,
253  _ExtractKey, _Hash, _RangeHash, _Unused,
254  __constant_iterators::value,
255  __hash_cached::value>;
256 
257  using const_local_iterator = __detail::_Local_const_iterator<
258  key_type, _Value,
259  _ExtractKey, _Hash, _RangeHash, _Unused,
260  __constant_iterators::value, __hash_cached::value>;
261 
262  private:
263  using __rehash_type = _RehashPolicy;
264  using __rehash_state = typename __rehash_type::_State;
265 
266  using __unique_keys = typename __traits_type::__unique_keys;
267 
268  using __hashtable_base = __detail::
269  _Hashtable_base<_Key, _Value, _ExtractKey,
270  _Equal, _Hash, _RangeHash, _Unused, _Traits>;
271 
272  using __hash_code_base = typename __hashtable_base::__hash_code_base;
273  using __hash_code = typename __hashtable_base::__hash_code;
274  using __ireturn_type = typename __insert_base::__ireturn_type;
275 
276  using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
277  _Equal, _Hash, _RangeHash, _Unused,
278  _RehashPolicy, _Traits>;
279 
280  using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
281  _ExtractKey, _Equal,
282  _Hash, _RangeHash, _Unused,
283  _RehashPolicy, _Traits>;
284 
285  using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
286  _Equal, _Hash, _RangeHash, _Unused,
287  _RehashPolicy, _Traits>;
288 
289  using __reuse_or_alloc_node_gen_t =
290  __detail::_ReuseOrAllocNode<__node_alloc_type>;
291  using __alloc_node_gen_t =
292  __detail::_AllocNode<__node_alloc_type>;
293  using __node_builder_t =
294  __detail::_NodeBuilder<_ExtractKey>;
295 
296  // Simple RAII type for managing a node containing an element
297  struct _Scoped_node
298  {
299  // Take ownership of a node with a constructed element.
300  _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
301  : _M_h(__h), _M_node(__n) { }
302 
303  // Allocate a node and construct an element within it.
304  template<typename... _Args>
305  _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
306  : _M_h(__h),
307  _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
308  { }
309 
310  // Destroy element and deallocate node.
311  ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
312 
313  _Scoped_node(const _Scoped_node&) = delete;
314  _Scoped_node& operator=(const _Scoped_node&) = delete;
315 
316  __hashtable_alloc* _M_h;
317  __node_ptr _M_node;
318  };
319 
320  template<typename _Ht>
321  static constexpr
322  __conditional_t<std::is_lvalue_reference<_Ht>::value,
323  const value_type&, value_type&&>
324  __fwd_value_for(value_type& __val) noexcept
325  { return std::move(__val); }
326 
327  // Compile-time diagnostics.
328 
329  // _Hash_code_base has everything protected, so use this derived type to
330  // access it.
331  struct __hash_code_base_access : __hash_code_base
332  { using __hash_code_base::_M_bucket_index; };
333 
334  // To get bucket index we need _RangeHash not to throw.
335  static_assert(is_nothrow_default_constructible<_RangeHash>::value,
336  "Functor used to map hash code to bucket index"
337  " must be nothrow default constructible");
338  static_assert(noexcept(
339  std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
340  "Functor used to map hash code to bucket index must be"
341  " noexcept");
342 
343  // To compute bucket index we also need _ExtratKey not to throw.
344  static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
345  "_ExtractKey must be nothrow default constructible");
346  static_assert(noexcept(
347  std::declval<const _ExtractKey&>()(std::declval<_Value>())),
348  "_ExtractKey functor must be noexcept invocable");
349 
350  template<typename _Keya, typename _Valuea, typename _Alloca,
351  typename _ExtractKeya, typename _Equala,
352  typename _Hasha, typename _RangeHasha, typename _Unuseda,
353  typename _RehashPolicya, typename _Traitsa,
354  bool _Unique_keysa>
355  friend struct __detail::_Map_base;
356 
357  template<typename _Keya, typename _Valuea, typename _Alloca,
358  typename _ExtractKeya, typename _Equala,
359  typename _Hasha, typename _RangeHasha, typename _Unuseda,
360  typename _RehashPolicya, typename _Traitsa>
361  friend struct __detail::_Insert_base;
362 
363  template<typename _Keya, typename _Valuea, typename _Alloca,
364  typename _ExtractKeya, typename _Equala,
365  typename _Hasha, typename _RangeHasha, typename _Unuseda,
366  typename _RehashPolicya, typename _Traitsa,
367  bool _Constant_iteratorsa>
368  friend struct __detail::_Insert;
369 
370  template<typename _Keya, typename _Valuea, typename _Alloca,
371  typename _ExtractKeya, typename _Equala,
372  typename _Hasha, typename _RangeHasha, typename _Unuseda,
373  typename _RehashPolicya, typename _Traitsa,
374  bool _Unique_keysa>
375  friend struct __detail::_Equality;
376 
377  public:
378  using size_type = typename __hashtable_base::size_type;
379  using difference_type = typename __hashtable_base::difference_type;
380 
381 #if __cplusplus > 201402L
382  using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
383  using insert_return_type = _Node_insert_return<iterator, node_type>;
384 #endif
385 
386  private:
387  __buckets_ptr _M_buckets = &_M_single_bucket;
388  size_type _M_bucket_count = 1;
389  __node_base _M_before_begin;
390  size_type _M_element_count = 0;
391  _RehashPolicy _M_rehash_policy;
392 
393  // A single bucket used when only need for 1 bucket. Especially
394  // interesting in move semantic to leave hashtable with only 1 bucket
395  // which is not allocated so that we can have those operations noexcept
396  // qualified.
397  // Note that we can't leave hashtable with 0 bucket without adding
398  // numerous checks in the code to avoid 0 modulus.
399  __node_base_ptr _M_single_bucket = nullptr;
400 
401  void
402  _M_update_bbegin()
403  {
404  if (_M_begin())
405  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
406  }
407 
408  void
409  _M_update_bbegin(__node_ptr __n)
410  {
411  _M_before_begin._M_nxt = __n;
412  _M_update_bbegin();
413  }
414 
415  bool
416  _M_uses_single_bucket(__buckets_ptr __bkts) const
417  { return __builtin_expect(__bkts == &_M_single_bucket, false); }
418 
419  bool
420  _M_uses_single_bucket() const
421  { return _M_uses_single_bucket(_M_buckets); }
422 
423  static constexpr size_t
424  __small_size_threshold() noexcept
425  {
426  return
427  __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
428  }
429 
430  __hashtable_alloc&
431  _M_base_alloc() { return *this; }
432 
433  __buckets_ptr
434  _M_allocate_buckets(size_type __bkt_count)
435  {
436  if (__builtin_expect(__bkt_count == 1, false))
437  {
438  _M_single_bucket = nullptr;
439  return &_M_single_bucket;
440  }
441 
442  return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
443  }
444 
445  void
446  _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
447  {
448  if (_M_uses_single_bucket(__bkts))
449  return;
450 
451  __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
452  }
453 
454  void
455  _M_deallocate_buckets()
456  { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
457 
458  // Gets bucket begin, deals with the fact that non-empty buckets contain
459  // their before begin node.
460  __node_ptr
461  _M_bucket_begin(size_type __bkt) const;
462 
463  __node_ptr
464  _M_begin() const
465  { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
466 
467  // Assign *this using another _Hashtable instance. Whether elements
468  // are copied or moved depends on the _Ht reference.
469  template<typename _Ht>
470  void
471  _M_assign_elements(_Ht&&);
472 
473  template<typename _Ht, typename _NodeGenerator>
474  void
475  _M_assign(_Ht&&, const _NodeGenerator&);
476 
477  void
478  _M_move_assign(_Hashtable&&, true_type);
479 
480  void
481  _M_move_assign(_Hashtable&&, false_type);
482 
483  void
484  _M_reset() noexcept;
485 
486  _Hashtable(const _Hash& __h, const _Equal& __eq,
487  const allocator_type& __a)
488  : __hashtable_base(__h, __eq),
489  __hashtable_alloc(__node_alloc_type(__a)),
490  __enable_default_ctor(_Enable_default_constructor_tag{})
491  { }
492 
493  template<bool _No_realloc = true>
494  static constexpr bool
495  _S_nothrow_move()
496  {
497 #if __cplusplus <= 201402L
498  return __and_<__bool_constant<_No_realloc>,
499  is_nothrow_copy_constructible<_Hash>,
500  is_nothrow_copy_constructible<_Equal>>::value;
501 #else
502  if constexpr (_No_realloc)
503  if constexpr (is_nothrow_copy_constructible<_Hash>())
504  return is_nothrow_copy_constructible<_Equal>();
505  return false;
506 #endif
507  }
508 
509  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
510  true_type /* alloc always equal */)
511  noexcept(_S_nothrow_move());
512 
513  _Hashtable(_Hashtable&&, __node_alloc_type&&,
514  false_type /* alloc always equal */);
515 
516  template<typename _InputIterator>
517  _Hashtable(_InputIterator __first, _InputIterator __last,
518  size_type __bkt_count_hint,
519  const _Hash&, const _Equal&, const allocator_type&,
520  true_type __uks);
521 
522  template<typename _InputIterator>
523  _Hashtable(_InputIterator __first, _InputIterator __last,
524  size_type __bkt_count_hint,
525  const _Hash&, const _Equal&, const allocator_type&,
526  false_type __uks);
527 
528  public:
529  // Constructor, destructor, assignment, swap
530  _Hashtable() = default;
531 
532  _Hashtable(const _Hashtable&);
533 
534  _Hashtable(const _Hashtable&, const allocator_type&);
535 
536  explicit
537  _Hashtable(size_type __bkt_count_hint,
538  const _Hash& __hf = _Hash(),
539  const key_equal& __eql = key_equal(),
540  const allocator_type& __a = allocator_type());
541 
542  // Use delegating constructors.
543  _Hashtable(_Hashtable&& __ht)
544  noexcept(_S_nothrow_move())
545  : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
546  true_type{})
547  { }
548 
549  _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
550  noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
551  : _Hashtable(std::move(__ht), __node_alloc_type(__a),
552  typename __node_alloc_traits::is_always_equal{})
553  { }
554 
555  explicit
556  _Hashtable(const allocator_type& __a)
557  : __hashtable_alloc(__node_alloc_type(__a)),
558  __enable_default_ctor(_Enable_default_constructor_tag{})
559  { }
560 
561  template<typename _InputIterator>
562  _Hashtable(_InputIterator __f, _InputIterator __l,
563  size_type __bkt_count_hint = 0,
564  const _Hash& __hf = _Hash(),
565  const key_equal& __eql = key_equal(),
566  const allocator_type& __a = allocator_type())
567  : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
568  __unique_keys{})
569  { }
570 
571  _Hashtable(initializer_list<value_type> __l,
572  size_type __bkt_count_hint = 0,
573  const _Hash& __hf = _Hash(),
574  const key_equal& __eql = key_equal(),
575  const allocator_type& __a = allocator_type())
576  : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
577  __hf, __eql, __a, __unique_keys{})
578  { }
579 
580  _Hashtable&
581  operator=(const _Hashtable& __ht);
582 
583  _Hashtable&
584  operator=(_Hashtable&& __ht)
585  noexcept(__node_alloc_traits::_S_nothrow_move()
586  && is_nothrow_move_assignable<_Hash>::value
587  && is_nothrow_move_assignable<_Equal>::value)
588  {
589  constexpr bool __move_storage =
590  __node_alloc_traits::_S_propagate_on_move_assign()
591  || __node_alloc_traits::_S_always_equal();
592  _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
593  return *this;
594  }
595 
596  _Hashtable&
597  operator=(initializer_list<value_type> __l)
598  {
599  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
600  _M_before_begin._M_nxt = nullptr;
601  clear();
602 
603  // We consider that all elements of __l are going to be inserted.
604  auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
605 
606  // Do not shrink to keep potential user reservation.
607  if (_M_bucket_count < __l_bkt_count)
608  rehash(__l_bkt_count);
609 
610  this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
611  return *this;
612  }
613 
614  ~_Hashtable() noexcept;
615 
616  void
617  swap(_Hashtable&)
618  noexcept(__and_<__is_nothrow_swappable<_Hash>,
619  __is_nothrow_swappable<_Equal>>::value);
620 
621  // Basic container operations
622  iterator
623  begin() noexcept
624  { return iterator(_M_begin()); }
625 
626  const_iterator
627  begin() const noexcept
628  { return const_iterator(_M_begin()); }
629 
630  iterator
631  end() noexcept
632  { return iterator(nullptr); }
633 
634  const_iterator
635  end() const noexcept
636  { return const_iterator(nullptr); }
637 
638  const_iterator
639  cbegin() const noexcept
640  { return const_iterator(_M_begin()); }
641 
642  const_iterator
643  cend() const noexcept
644  { return const_iterator(nullptr); }
645 
646  size_type
647  size() const noexcept
648  { return _M_element_count; }
649 
650  _GLIBCXX_NODISCARD bool
651  empty() const noexcept
652  { return size() == 0; }
653 
654  allocator_type
655  get_allocator() const noexcept
656  { return allocator_type(this->_M_node_allocator()); }
657 
658  size_type
659  max_size() const noexcept
660  { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
661 
662  // Observers
663  key_equal
664  key_eq() const
665  { return this->_M_eq(); }
666 
667  // hash_function, if present, comes from _Hash_code_base.
668 
669  // Bucket operations
670  size_type
671  bucket_count() const noexcept
672  { return _M_bucket_count; }
673 
674  size_type
675  max_bucket_count() const noexcept
676  { return max_size(); }
677 
678  size_type
679  bucket_size(size_type __bkt) const
680  { return std::distance(begin(__bkt), end(__bkt)); }
681 
682  size_type
683  bucket(const key_type& __k) const
684  { return _M_bucket_index(this->_M_hash_code(__k)); }
685 
686  local_iterator
687  begin(size_type __bkt)
688  {
689  return local_iterator(*this, _M_bucket_begin(__bkt),
690  __bkt, _M_bucket_count);
691  }
692 
693  local_iterator
694  end(size_type __bkt)
695  { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
696 
697  const_local_iterator
698  begin(size_type __bkt) const
699  {
700  return const_local_iterator(*this, _M_bucket_begin(__bkt),
701  __bkt, _M_bucket_count);
702  }
703 
704  const_local_iterator
705  end(size_type __bkt) const
706  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
707 
708  // DR 691.
709  const_local_iterator
710  cbegin(size_type __bkt) const
711  {
712  return const_local_iterator(*this, _M_bucket_begin(__bkt),
713  __bkt, _M_bucket_count);
714  }
715 
716  const_local_iterator
717  cend(size_type __bkt) const
718  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
719 
720  float
721  load_factor() const noexcept
722  {
723  return static_cast<float>(size()) / static_cast<float>(bucket_count());
724  }
725 
726  // max_load_factor, if present, comes from _Rehash_base.
727 
728  // Generalization of max_load_factor. Extension, not found in
729  // TR1. Only useful if _RehashPolicy is something other than
730  // the default.
731  const _RehashPolicy&
732  __rehash_policy() const
733  { return _M_rehash_policy; }
734 
735  void
736  __rehash_policy(const _RehashPolicy& __pol)
737  { _M_rehash_policy = __pol; }
738 
739  // Lookup.
740  iterator
741  find(const key_type& __k);
742 
743  const_iterator
744  find(const key_type& __k) const;
745 
746  size_type
747  count(const key_type& __k) const;
748 
750  equal_range(const key_type& __k);
751 
753  equal_range(const key_type& __k) const;
754 
755 #if __cplusplus >= 202002L
756 #define __cpp_lib_generic_unordered_lookup 201811L
757 
758  template<typename _Kt,
759  typename = __has_is_transparent_t<_Hash, _Kt>,
760  typename = __has_is_transparent_t<_Equal, _Kt>>
761  iterator
762  _M_find_tr(const _Kt& __k);
763 
764  template<typename _Kt,
765  typename = __has_is_transparent_t<_Hash, _Kt>,
766  typename = __has_is_transparent_t<_Equal, _Kt>>
767  const_iterator
768  _M_find_tr(const _Kt& __k) const;
769 
770  template<typename _Kt,
771  typename = __has_is_transparent_t<_Hash, _Kt>,
772  typename = __has_is_transparent_t<_Equal, _Kt>>
773  size_type
774  _M_count_tr(const _Kt& __k) const;
775 
776  template<typename _Kt,
777  typename = __has_is_transparent_t<_Hash, _Kt>,
778  typename = __has_is_transparent_t<_Equal, _Kt>>
779  pair<iterator, iterator>
780  _M_equal_range_tr(const _Kt& __k);
781 
782  template<typename _Kt,
783  typename = __has_is_transparent_t<_Hash, _Kt>,
784  typename = __has_is_transparent_t<_Equal, _Kt>>
785  pair<const_iterator, const_iterator>
786  _M_equal_range_tr(const _Kt& __k) const;
787 #endif // C++20
788 
789  private:
790  // Bucket index computation helpers.
791  size_type
792  _M_bucket_index(const __node_value_type& __n) const noexcept
793  { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
794 
795  size_type
796  _M_bucket_index(__hash_code __c) const
797  { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
798 
799  __node_base_ptr
800  _M_find_before_node(const key_type&);
801 
802  // Find and insert helper functions and types
803  // Find the node before the one matching the criteria.
804  __node_base_ptr
805  _M_find_before_node(size_type, const key_type&, __hash_code) const;
806 
807  template<typename _Kt>
808  __node_base_ptr
809  _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
810 
811  __node_ptr
812  _M_find_node(size_type __bkt, const key_type& __key,
813  __hash_code __c) const
814  {
815  __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
816  if (__before_n)
817  return static_cast<__node_ptr>(__before_n->_M_nxt);
818  return nullptr;
819  }
820 
821  template<typename _Kt>
822  __node_ptr
823  _M_find_node_tr(size_type __bkt, const _Kt& __key,
824  __hash_code __c) const
825  {
826  auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
827  if (__before_n)
828  return static_cast<__node_ptr>(__before_n->_M_nxt);
829  return nullptr;
830  }
831 
832  // Insert a node at the beginning of a bucket.
833  void
834  _M_insert_bucket_begin(size_type, __node_ptr);
835 
836  // Remove the bucket first node
837  void
838  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
839  size_type __next_bkt);
840 
841  // Get the node before __n in the bucket __bkt
842  __node_base_ptr
843  _M_get_previous_node(size_type __bkt, __node_ptr __n);
844 
845  pair<const_iterator, __hash_code>
846  _M_compute_hash_code(const_iterator __hint, const key_type& __k) const;
847 
848  // Insert node __n with hash code __code, in bucket __bkt if no
849  // rehash (assumes no element with same key already present).
850  // Takes ownership of __n if insertion succeeds, throws otherwise.
851  iterator
852  _M_insert_unique_node(size_type __bkt, __hash_code,
853  __node_ptr __n, size_type __n_elt = 1);
854 
855  // Insert node __n with key __k and hash code __code.
856  // Takes ownership of __n if insertion succeeds, throws otherwise.
857  iterator
858  _M_insert_multi_node(__node_ptr __hint,
859  __hash_code __code, __node_ptr __n);
860 
861  template<typename... _Args>
863  _M_emplace(true_type __uks, _Args&&... __args);
864 
865  template<typename... _Args>
866  iterator
867  _M_emplace(false_type __uks, _Args&&... __args)
868  { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
869 
870  // Emplace with hint, useless when keys are unique.
871  template<typename... _Args>
872  iterator
873  _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
874  { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
875 
876  template<typename... _Args>
877  iterator
878  _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
879 
880  template<typename _Kt, typename _Arg, typename _NodeGenerator>
882  _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
883 
884  template<typename _Kt>
885  static __conditional_t<
886  __and_<__is_nothrow_invocable<_Hash&, const key_type&>,
887  __not_<__is_nothrow_invocable<_Hash&, _Kt>>>::value,
888  key_type, _Kt&&>
889  _S_forward_key(_Kt&& __k)
890  { return std::forward<_Kt>(__k); }
891 
892  static const key_type&
893  _S_forward_key(const key_type& __k)
894  { return __k; }
895 
896  static key_type&&
897  _S_forward_key(key_type&& __k)
898  { return std::move(__k); }
899 
900  template<typename _Arg, typename _NodeGenerator>
902  _M_insert_unique_aux(_Arg&& __arg, const _NodeGenerator& __node_gen)
903  {
904  return _M_insert_unique(
905  _S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
906  std::forward<_Arg>(__arg), __node_gen);
907  }
908 
909  template<typename _Arg, typename _NodeGenerator>
911  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
912  true_type /* __uks */)
913  {
914  using __to_value
915  = __detail::_ConvertToValueType<_ExtractKey, value_type>;
916  return _M_insert_unique_aux(
917  __to_value{}(std::forward<_Arg>(__arg)), __node_gen);
918  }
919 
920  template<typename _Arg, typename _NodeGenerator>
921  iterator
922  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
923  false_type __uks)
924  {
925  using __to_value
926  = __detail::_ConvertToValueType<_ExtractKey, value_type>;
927  return _M_insert(cend(),
928  __to_value{}(std::forward<_Arg>(__arg)), __node_gen, __uks);
929  }
930 
931  // Insert with hint, not used when keys are unique.
932  template<typename _Arg, typename _NodeGenerator>
933  iterator
934  _M_insert(const_iterator, _Arg&& __arg,
935  const _NodeGenerator& __node_gen, true_type __uks)
936  {
937  return
938  _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
939  }
940 
941  // Insert with hint when keys are not unique.
942  template<typename _Arg, typename _NodeGenerator>
943  iterator
944  _M_insert(const_iterator, _Arg&&,
945  const _NodeGenerator&, false_type __uks);
946 
947  size_type
948  _M_erase(true_type __uks, const key_type&);
949 
950  size_type
951  _M_erase(false_type __uks, const key_type&);
952 
953  iterator
954  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
955 
956  public:
957  // Emplace
958  template<typename... _Args>
959  __ireturn_type
960  emplace(_Args&&... __args)
961  { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
962 
963  template<typename... _Args>
964  iterator
965  emplace_hint(const_iterator __hint, _Args&&... __args)
966  {
967  return _M_emplace(__hint, __unique_keys{},
968  std::forward<_Args>(__args)...);
969  }
970 
971  // Insert member functions via inheritance.
972 
973  // Erase
974  iterator
975  erase(const_iterator);
976 
977  // LWG 2059.
978  iterator
979  erase(iterator __it)
980  { return erase(const_iterator(__it)); }
981 
982  size_type
983  erase(const key_type& __k)
984  { return _M_erase(__unique_keys{}, __k); }
985 
986  iterator
987  erase(const_iterator, const_iterator);
988 
989  void
990  clear() noexcept;
991 
992  // Set number of buckets keeping it appropriate for container's number
993  // of elements.
994  void rehash(size_type __bkt_count);
995 
996  // DR 1189.
997  // reserve, if present, comes from _Rehash_base.
998 
999 #if __cplusplus > 201404L
1000  /// Re-insert an extracted node into a container with unique keys.
1001  insert_return_type
1002  _M_reinsert_node(node_type&& __nh)
1003  {
1004  insert_return_type __ret;
1005  if (__nh.empty())
1006  __ret.position = end();
1007  else
1008  {
1009  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1010 
1011  const key_type& __k = __nh._M_key();
1012  __hash_code __code = this->_M_hash_code(__k);
1013  size_type __bkt = _M_bucket_index(__code);
1014  if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
1015  {
1016  __ret.node = std::move(__nh);
1017  __ret.position = iterator(__n);
1018  __ret.inserted = false;
1019  }
1020  else
1021  {
1022  __ret.position
1023  = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1024  __nh.release();
1025  __ret.inserted = true;
1026  }
1027  }
1028  return __ret;
1029  }
1030 
1031  /// Re-insert an extracted node into a container with equivalent keys.
1032  iterator
1033  _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1034  {
1035  if (__nh.empty())
1036  return end();
1037 
1038  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1039 
1040  const key_type& __k = __nh._M_key();
1041  auto __code = this->_M_hash_code(__k);
1042  auto __ret
1043  = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1044  __nh.release();
1045  return __ret;
1046  }
1047 
1048  private:
1049  node_type
1050  _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1051  {
1052  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1053  if (__prev_n == _M_buckets[__bkt])
1054  _M_remove_bucket_begin(__bkt, __n->_M_next(),
1055  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1056  else if (__n->_M_nxt)
1057  {
1058  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1059  if (__next_bkt != __bkt)
1060  _M_buckets[__next_bkt] = __prev_n;
1061  }
1062 
1063  __prev_n->_M_nxt = __n->_M_nxt;
1064  __n->_M_nxt = nullptr;
1065  --_M_element_count;
1066  return { __n, this->_M_node_allocator() };
1067  }
1068 
1069  // Only use the possibly cached node's hash code if its hash function
1070  // _H2 matches _Hash and is stateless. Otherwise recompute it using _Hash.
1071  template<typename _H2>
1072  __hash_code
1073  _M_src_hash_code(const _H2&, const key_type& __k,
1074  const __node_value_type& __src_n) const
1075  {
1076  if constexpr (std::is_same_v<_H2, _Hash>)
1077  if constexpr (std::is_empty_v<_Hash>)
1078  return this->_M_hash_code(__src_n);
1079 
1080  return this->_M_hash_code(__k);
1081  }
1082 
1083  public:
1084  // Extract a node.
1085  node_type
1086  extract(const_iterator __pos)
1087  {
1088  size_t __bkt = _M_bucket_index(*__pos._M_cur);
1089  return _M_extract_node(__bkt,
1090  _M_get_previous_node(__bkt, __pos._M_cur));
1091  }
1092 
1093  /// Extract a node.
1094  node_type
1095  extract(const _Key& __k)
1096  {
1097  node_type __nh;
1098  __hash_code __code = this->_M_hash_code(__k);
1099  std::size_t __bkt = _M_bucket_index(__code);
1100  if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1101  __nh = _M_extract_node(__bkt, __prev_node);
1102  return __nh;
1103  }
1104 
1105  /// Merge from a compatible container into one with unique keys.
1106  template<typename _Compatible_Hashtable>
1107  void
1108  _M_merge_unique(_Compatible_Hashtable& __src)
1109  {
1110  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1111  node_type>, "Node types are compatible");
1112  __glibcxx_assert(get_allocator() == __src.get_allocator());
1113 
1114  auto __n_elt = __src.size();
1115  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1116  {
1117  auto __pos = __i++;
1118  const key_type& __k = _ExtractKey{}(*__pos);
1119  __hash_code __code
1120  = _M_src_hash_code(__src.hash_function(), __k, *__pos._M_cur);
1121  size_type __bkt = _M_bucket_index(__code);
1122  if (_M_find_node(__bkt, __k, __code) == nullptr)
1123  {
1124  auto __nh = __src.extract(__pos);
1125  _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1126  __nh.release();
1127  __n_elt = 1;
1128  }
1129  else if (__n_elt != 1)
1130  --__n_elt;
1131  }
1132  }
1133 
1134  /// Merge from a compatible container into one with equivalent keys.
1135  template<typename _Compatible_Hashtable>
1136  void
1137  _M_merge_multi(_Compatible_Hashtable& __src)
1138  {
1139  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1140  node_type>, "Node types are compatible");
1141  __glibcxx_assert(get_allocator() == __src.get_allocator());
1142 
1143  __node_ptr __hint = nullptr;
1144  this->reserve(size() + __src.size());
1145  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1146  {
1147  auto __pos = __i++;
1148  const key_type& __k = _ExtractKey{}(*__pos);
1149  __hash_code __code
1150  = _M_src_hash_code(__src.hash_function(), __k, *__pos._M_cur);
1151  auto __nh = __src.extract(__pos);
1152  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1153  __nh.release();
1154  }
1155  }
1156 #endif // C++17
1157 
1158  private:
1159  // Helper rehash method used when keys are unique.
1160  void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1161 
1162  // Helper rehash method used when keys can be non-unique.
1163  void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1164 
1165  // Unconditionally change size of bucket array to n, restore
1166  // hash policy state to __state on exception.
1167  void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1168  };
1169 
1170  // Definitions of class template _Hashtable's out-of-line member functions.
1171  template<typename _Key, typename _Value, typename _Alloc,
1172  typename _ExtractKey, typename _Equal,
1173  typename _Hash, typename _RangeHash, typename _Unused,
1174  typename _RehashPolicy, typename _Traits>
1175  auto
1176  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1177  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1178  _M_bucket_begin(size_type __bkt) const
1179  -> __node_ptr
1180  {
1181  __node_base_ptr __n = _M_buckets[__bkt];
1182  return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1183  }
1184 
1185  template<typename _Key, typename _Value, typename _Alloc,
1186  typename _ExtractKey, typename _Equal,
1187  typename _Hash, typename _RangeHash, typename _Unused,
1188  typename _RehashPolicy, typename _Traits>
1189  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1190  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1191  _Hashtable(size_type __bkt_count_hint,
1192  const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1193  : _Hashtable(__h, __eq, __a)
1194  {
1195  auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1196  if (__bkt_count > _M_bucket_count)
1197  {
1198  _M_buckets = _M_allocate_buckets(__bkt_count);
1199  _M_bucket_count = __bkt_count;
1200  }
1201  }
1202 
1203  template<typename _Key, typename _Value, typename _Alloc,
1204  typename _ExtractKey, typename _Equal,
1205  typename _Hash, typename _RangeHash, typename _Unused,
1206  typename _RehashPolicy, typename _Traits>
1207  template<typename _InputIterator>
1208  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1209  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1210  _Hashtable(_InputIterator __f, _InputIterator __l,
1211  size_type __bkt_count_hint,
1212  const _Hash& __h, const _Equal& __eq,
1213  const allocator_type& __a, true_type /* __uks */)
1214  : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1215  { this->insert(__f, __l); }
1216 
1217  template<typename _Key, typename _Value, typename _Alloc,
1218  typename _ExtractKey, typename _Equal,
1219  typename _Hash, typename _RangeHash, typename _Unused,
1220  typename _RehashPolicy, typename _Traits>
1221  template<typename _InputIterator>
1222  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1223  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1224  _Hashtable(_InputIterator __f, _InputIterator __l,
1225  size_type __bkt_count_hint,
1226  const _Hash& __h, const _Equal& __eq,
1227  const allocator_type& __a, false_type __uks)
1228  : _Hashtable(__h, __eq, __a)
1229  {
1230  auto __nb_elems = __detail::__distance_fw(__f, __l);
1231  auto __bkt_count =
1232  _M_rehash_policy._M_next_bkt(
1233  std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1234  __bkt_count_hint));
1235 
1236  if (__bkt_count > _M_bucket_count)
1237  {
1238  _M_buckets = _M_allocate_buckets(__bkt_count);
1239  _M_bucket_count = __bkt_count;
1240  }
1241 
1242  __alloc_node_gen_t __node_gen(*this);
1243  for (; __f != __l; ++__f)
1244  _M_insert(*__f, __node_gen, __uks);
1245  }
1246 
1247  template<typename _Key, typename _Value, typename _Alloc,
1248  typename _ExtractKey, typename _Equal,
1249  typename _Hash, typename _RangeHash, typename _Unused,
1250  typename _RehashPolicy, typename _Traits>
1251  auto
1252  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1253  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1254  operator=(const _Hashtable& __ht)
1255  -> _Hashtable&
1256  {
1257  if (&__ht == this)
1258  return *this;
1259 
1260  if (__node_alloc_traits::_S_propagate_on_copy_assign())
1261  {
1262  auto& __this_alloc = this->_M_node_allocator();
1263  auto& __that_alloc = __ht._M_node_allocator();
1264  if (!__node_alloc_traits::_S_always_equal()
1265  && __this_alloc != __that_alloc)
1266  {
1267  // Replacement allocator cannot free existing storage.
1268  this->_M_deallocate_nodes(_M_begin());
1269  _M_before_begin._M_nxt = nullptr;
1270  _M_deallocate_buckets();
1271  _M_buckets = nullptr;
1272  std::__alloc_on_copy(__this_alloc, __that_alloc);
1273  __hashtable_base::operator=(__ht);
1274  _M_bucket_count = __ht._M_bucket_count;
1275  _M_element_count = __ht._M_element_count;
1276  _M_rehash_policy = __ht._M_rehash_policy;
1277  __alloc_node_gen_t __alloc_node_gen(*this);
1278  __try
1279  {
1280  _M_assign(__ht, __alloc_node_gen);
1281  }
1282  __catch(...)
1283  {
1284  // _M_assign took care of deallocating all memory. Now we
1285  // must make sure this instance remains in a usable state.
1286  _M_reset();
1287  __throw_exception_again;
1288  }
1289  return *this;
1290  }
1291  std::__alloc_on_copy(__this_alloc, __that_alloc);
1292  }
1293 
1294  // Reuse allocated buckets and nodes.
1295  _M_assign_elements(__ht);
1296  return *this;
1297  }
1298 
1299  template<typename _Key, typename _Value, typename _Alloc,
1300  typename _ExtractKey, typename _Equal,
1301  typename _Hash, typename _RangeHash, typename _Unused,
1302  typename _RehashPolicy, typename _Traits>
1303  template<typename _Ht>
1304  void
1305  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1306  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1307  _M_assign_elements(_Ht&& __ht)
1308  {
1309  __buckets_ptr __former_buckets = nullptr;
1310  std::size_t __former_bucket_count = _M_bucket_count;
1311  const __rehash_state& __former_state = _M_rehash_policy._M_state();
1312 
1313  if (_M_bucket_count != __ht._M_bucket_count)
1314  {
1315  __former_buckets = _M_buckets;
1316  _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1317  _M_bucket_count = __ht._M_bucket_count;
1318  }
1319  else
1320  __builtin_memset(_M_buckets, 0,
1321  _M_bucket_count * sizeof(__node_base_ptr));
1322 
1323  __try
1324  {
1325  __hashtable_base::operator=(std::forward<_Ht>(__ht));
1326  _M_element_count = __ht._M_element_count;
1327  _M_rehash_policy = __ht._M_rehash_policy;
1328  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1329  _M_before_begin._M_nxt = nullptr;
1330  _M_assign(std::forward<_Ht>(__ht), __roan);
1331  if (__former_buckets)
1332  _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1333  }
1334  __catch(...)
1335  {
1336  if (__former_buckets)
1337  {
1338  // Restore previous buckets.
1339  _M_deallocate_buckets();
1340  _M_rehash_policy._M_reset(__former_state);
1341  _M_buckets = __former_buckets;
1342  _M_bucket_count = __former_bucket_count;
1343  }
1344  __builtin_memset(_M_buckets, 0,
1345  _M_bucket_count * sizeof(__node_base_ptr));
1346  __throw_exception_again;
1347  }
1348  }
1349 
1350  template<typename _Key, typename _Value, typename _Alloc,
1351  typename _ExtractKey, typename _Equal,
1352  typename _Hash, typename _RangeHash, typename _Unused,
1353  typename _RehashPolicy, typename _Traits>
1354  template<typename _Ht, typename _NodeGenerator>
1355  void
1356  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1357  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1358  _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1359  {
1360  __buckets_ptr __buckets = nullptr;
1361  if (!_M_buckets)
1362  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1363 
1364  __try
1365  {
1366  if (!__ht._M_before_begin._M_nxt)
1367  return;
1368 
1369  // First deal with the special first node pointed to by
1370  // _M_before_begin.
1371  __node_ptr __ht_n = __ht._M_begin();
1372  __node_ptr __this_n
1373  = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1374  this->_M_copy_code(*__this_n, *__ht_n);
1375  _M_update_bbegin(__this_n);
1376 
1377  // Then deal with other nodes.
1378  __node_ptr __prev_n = __this_n;
1379  for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1380  {
1381  __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1382  __prev_n->_M_nxt = __this_n;
1383  this->_M_copy_code(*__this_n, *__ht_n);
1384  size_type __bkt = _M_bucket_index(*__this_n);
1385  if (!_M_buckets[__bkt])
1386  _M_buckets[__bkt] = __prev_n;
1387  __prev_n = __this_n;
1388  }
1389  }
1390  __catch(...)
1391  {
1392  clear();
1393  if (__buckets)
1394  _M_deallocate_buckets();
1395  __throw_exception_again;
1396  }
1397  }
1398 
1399  template<typename _Key, typename _Value, typename _Alloc,
1400  typename _ExtractKey, typename _Equal,
1401  typename _Hash, typename _RangeHash, typename _Unused,
1402  typename _RehashPolicy, typename _Traits>
1403  void
1404  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1405  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1406  _M_reset() noexcept
1407  {
1408  _M_rehash_policy._M_reset();
1409  _M_bucket_count = 1;
1410  _M_single_bucket = nullptr;
1411  _M_buckets = &_M_single_bucket;
1412  _M_before_begin._M_nxt = nullptr;
1413  _M_element_count = 0;
1414  }
1415 
1416  template<typename _Key, typename _Value, typename _Alloc,
1417  typename _ExtractKey, typename _Equal,
1418  typename _Hash, typename _RangeHash, typename _Unused,
1419  typename _RehashPolicy, typename _Traits>
1420  void
1421  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1422  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1423  _M_move_assign(_Hashtable&& __ht, true_type)
1424  {
1425  if (__builtin_expect(std::__addressof(__ht) == this, false))
1426  return;
1427 
1428  this->_M_deallocate_nodes(_M_begin());
1429  _M_deallocate_buckets();
1430  __hashtable_base::operator=(std::move(__ht));
1431  _M_rehash_policy = __ht._M_rehash_policy;
1432  if (!__ht._M_uses_single_bucket())
1433  _M_buckets = __ht._M_buckets;
1434  else
1435  {
1436  _M_buckets = &_M_single_bucket;
1437  _M_single_bucket = __ht._M_single_bucket;
1438  }
1439 
1440  _M_bucket_count = __ht._M_bucket_count;
1441  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1442  _M_element_count = __ht._M_element_count;
1443  std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1444 
1445  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1446  _M_update_bbegin();
1447  __ht._M_reset();
1448  }
1449 
1450  template<typename _Key, typename _Value, typename _Alloc,
1451  typename _ExtractKey, typename _Equal,
1452  typename _Hash, typename _RangeHash, typename _Unused,
1453  typename _RehashPolicy, typename _Traits>
1454  void
1455  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1456  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1457  _M_move_assign(_Hashtable&& __ht, false_type)
1458  {
1459  if (__ht._M_node_allocator() == this->_M_node_allocator())
1460  _M_move_assign(std::move(__ht), true_type{});
1461  else
1462  {
1463  // Can't move memory, move elements then.
1464  _M_assign_elements(std::move(__ht));
1465  __ht.clear();
1466  }
1467  }
1468 
1469  template<typename _Key, typename _Value, typename _Alloc,
1470  typename _ExtractKey, typename _Equal,
1471  typename _Hash, typename _RangeHash, typename _Unused,
1472  typename _RehashPolicy, typename _Traits>
1473  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1474  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1475  _Hashtable(const _Hashtable& __ht)
1476  : __hashtable_base(__ht),
1477  __map_base(__ht),
1478  __rehash_base(__ht),
1479  __hashtable_alloc(
1480  __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1481  __enable_default_ctor(__ht),
1482  _M_buckets(nullptr),
1483  _M_bucket_count(__ht._M_bucket_count),
1484  _M_element_count(__ht._M_element_count),
1485  _M_rehash_policy(__ht._M_rehash_policy)
1486  {
1487  __alloc_node_gen_t __alloc_node_gen(*this);
1488  _M_assign(__ht, __alloc_node_gen);
1489  }
1490 
1491  template<typename _Key, typename _Value, typename _Alloc,
1492  typename _ExtractKey, typename _Equal,
1493  typename _Hash, typename _RangeHash, typename _Unused,
1494  typename _RehashPolicy, typename _Traits>
1495  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1496  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1497  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1498  true_type /* alloc always equal */)
1499  noexcept(_S_nothrow_move())
1500  : __hashtable_base(__ht),
1501  __map_base(__ht),
1502  __rehash_base(__ht),
1503  __hashtable_alloc(std::move(__a)),
1504  __enable_default_ctor(__ht),
1505  _M_buckets(__ht._M_buckets),
1506  _M_bucket_count(__ht._M_bucket_count),
1507  _M_before_begin(__ht._M_before_begin._M_nxt),
1508  _M_element_count(__ht._M_element_count),
1509  _M_rehash_policy(__ht._M_rehash_policy)
1510  {
1511  // Update buckets if __ht is using its single bucket.
1512  if (__ht._M_uses_single_bucket())
1513  {
1514  _M_buckets = &_M_single_bucket;
1515  _M_single_bucket = __ht._M_single_bucket;
1516  }
1517 
1518  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1519  _M_update_bbegin();
1520 
1521  __ht._M_reset();
1522  }
1523 
1524  template<typename _Key, typename _Value, typename _Alloc,
1525  typename _ExtractKey, typename _Equal,
1526  typename _Hash, typename _RangeHash, typename _Unused,
1527  typename _RehashPolicy, typename _Traits>
1528  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1529  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1530  _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1531  : __hashtable_base(__ht),
1532  __map_base(__ht),
1533  __rehash_base(__ht),
1534  __hashtable_alloc(__node_alloc_type(__a)),
1535  __enable_default_ctor(__ht),
1536  _M_buckets(),
1537  _M_bucket_count(__ht._M_bucket_count),
1538  _M_element_count(__ht._M_element_count),
1539  _M_rehash_policy(__ht._M_rehash_policy)
1540  {
1541  __alloc_node_gen_t __alloc_node_gen(*this);
1542  _M_assign(__ht, __alloc_node_gen);
1543  }
1544 
1545  template<typename _Key, typename _Value, typename _Alloc,
1546  typename _ExtractKey, typename _Equal,
1547  typename _Hash, typename _RangeHash, typename _Unused,
1548  typename _RehashPolicy, typename _Traits>
1549  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1550  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1551  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1552  false_type /* alloc always equal */)
1553  : __hashtable_base(__ht),
1554  __map_base(__ht),
1555  __rehash_base(__ht),
1556  __hashtable_alloc(std::move(__a)),
1557  __enable_default_ctor(__ht),
1558  _M_buckets(nullptr),
1559  _M_bucket_count(__ht._M_bucket_count),
1560  _M_element_count(__ht._M_element_count),
1561  _M_rehash_policy(__ht._M_rehash_policy)
1562  {
1563  if (__ht._M_node_allocator() == this->_M_node_allocator())
1564  {
1565  if (__ht._M_uses_single_bucket())
1566  {
1567  _M_buckets = &_M_single_bucket;
1568  _M_single_bucket = __ht._M_single_bucket;
1569  }
1570  else
1571  _M_buckets = __ht._M_buckets;
1572 
1573  // Fix bucket containing the _M_before_begin pointer that can't be
1574  // moved.
1575  _M_update_bbegin(__ht._M_begin());
1576 
1577  __ht._M_reset();
1578  }
1579  else
1580  {
1581  __alloc_node_gen_t __alloc_gen(*this);
1582 
1583  using _Fwd_Ht = __conditional_t<
1584  __move_if_noexcept_cond<value_type>::value,
1585  const _Hashtable&, _Hashtable&&>;
1586  _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1587  __ht.clear();
1588  }
1589  }
1590 
1591  template<typename _Key, typename _Value, typename _Alloc,
1592  typename _ExtractKey, typename _Equal,
1593  typename _Hash, typename _RangeHash, typename _Unused,
1594  typename _RehashPolicy, typename _Traits>
1595  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1596  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1597  ~_Hashtable() noexcept
1598  {
1599  // Getting a bucket index from a node shall not throw because it is used
1600  // in methods (erase, swap...) that shall not throw. Need a complete
1601  // type to check this, so do it in the destructor not at class scope.
1602  static_assert(noexcept(declval<const __hash_code_base_access&>()
1603  ._M_bucket_index(declval<const __node_value_type&>(),
1604  (std::size_t)0)),
1605  "Cache the hash code or qualify your functors involved"
1606  " in hash code and bucket index computation with noexcept");
1607 
1608  clear();
1609  _M_deallocate_buckets();
1610  }
1611 
1612  template<typename _Key, typename _Value, typename _Alloc,
1613  typename _ExtractKey, typename _Equal,
1614  typename _Hash, typename _RangeHash, typename _Unused,
1615  typename _RehashPolicy, typename _Traits>
1616  void
1617  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1618  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1619  swap(_Hashtable& __x)
1620  noexcept(__and_<__is_nothrow_swappable<_Hash>,
1621  __is_nothrow_swappable<_Equal>>::value)
1622  {
1623  // The only base class with member variables is hash_code_base.
1624  // We define _Hash_code_base::_M_swap because different
1625  // specializations have different members.
1626  this->_M_swap(__x);
1627 
1628  std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1629  std::swap(_M_rehash_policy, __x._M_rehash_policy);
1630 
1631  // Deal properly with potentially moved instances.
1632  if (this->_M_uses_single_bucket())
1633  {
1634  if (!__x._M_uses_single_bucket())
1635  {
1636  _M_buckets = __x._M_buckets;
1637  __x._M_buckets = &__x._M_single_bucket;
1638  }
1639  }
1640  else if (__x._M_uses_single_bucket())
1641  {
1642  __x._M_buckets = _M_buckets;
1643  _M_buckets = &_M_single_bucket;
1644  }
1645  else
1646  std::swap(_M_buckets, __x._M_buckets);
1647 
1648  std::swap(_M_bucket_count, __x._M_bucket_count);
1649  std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1650  std::swap(_M_element_count, __x._M_element_count);
1651  std::swap(_M_single_bucket, __x._M_single_bucket);
1652 
1653  // Fix buckets containing the _M_before_begin pointers that can't be
1654  // swapped.
1655  _M_update_bbegin();
1656  __x._M_update_bbegin();
1657  }
1658 
1659  template<typename _Key, typename _Value, typename _Alloc,
1660  typename _ExtractKey, typename _Equal,
1661  typename _Hash, typename _RangeHash, typename _Unused,
1662  typename _RehashPolicy, typename _Traits>
1663  auto
1664  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1665  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1666  find(const key_type& __k)
1667  -> iterator
1668  {
1669  if (size() <= __small_size_threshold())
1670  {
1671  for (auto __it = begin(); __it != end(); ++__it)
1672  if (this->_M_key_equals(__k, *__it._M_cur))
1673  return __it;
1674  return end();
1675  }
1676 
1677  __hash_code __code = this->_M_hash_code(__k);
1678  std::size_t __bkt = _M_bucket_index(__code);
1679  return iterator(_M_find_node(__bkt, __k, __code));
1680  }
1681 
1682  template<typename _Key, typename _Value, typename _Alloc,
1683  typename _ExtractKey, typename _Equal,
1684  typename _Hash, typename _RangeHash, typename _Unused,
1685  typename _RehashPolicy, typename _Traits>
1686  auto
1687  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1688  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1689  find(const key_type& __k) const
1690  -> const_iterator
1691  {
1692  if (size() <= __small_size_threshold())
1693  {
1694  for (auto __it = begin(); __it != end(); ++__it)
1695  if (this->_M_key_equals(__k, *__it._M_cur))
1696  return __it;
1697  return end();
1698  }
1699 
1700  __hash_code __code = this->_M_hash_code(__k);
1701  std::size_t __bkt = _M_bucket_index(__code);
1702  return const_iterator(_M_find_node(__bkt, __k, __code));
1703  }
1704 
1705 #if __cplusplus > 201703L
1706  template<typename _Key, typename _Value, typename _Alloc,
1707  typename _ExtractKey, typename _Equal,
1708  typename _Hash, typename _RangeHash, typename _Unused,
1709  typename _RehashPolicy, typename _Traits>
1710  template<typename _Kt, typename, typename>
1711  auto
1712  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1713  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1714  _M_find_tr(const _Kt& __k)
1715  -> iterator
1716  {
1717  __hash_code __code = this->_M_hash_code_tr(__k);
1718  std::size_t __bkt = _M_bucket_index(__code);
1719  return iterator(_M_find_node_tr(__bkt, __k, __code));
1720  }
1721 
1722  template<typename _Key, typename _Value, typename _Alloc,
1723  typename _ExtractKey, typename _Equal,
1724  typename _Hash, typename _RangeHash, typename _Unused,
1725  typename _RehashPolicy, typename _Traits>
1726  template<typename _Kt, typename, typename>
1727  auto
1728  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1729  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1730  _M_find_tr(const _Kt& __k) const
1731  -> const_iterator
1732  {
1733  __hash_code __code = this->_M_hash_code_tr(__k);
1734  std::size_t __bkt = _M_bucket_index(__code);
1735  return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1736  }
1737 #endif
1738 
1739  template<typename _Key, typename _Value, typename _Alloc,
1740  typename _ExtractKey, typename _Equal,
1741  typename _Hash, typename _RangeHash, typename _Unused,
1742  typename _RehashPolicy, typename _Traits>
1743  auto
1744  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1745  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1746  count(const key_type& __k) const
1747  -> size_type
1748  {
1749  auto __it = find(__k);
1750  if (!__it._M_cur)
1751  return 0;
1752 
1753  if (__unique_keys::value)
1754  return 1;
1755 
1756  // All equivalent values are next to each other, if we find a
1757  // non-equivalent value after an equivalent one it means that we won't
1758  // find any new equivalent value.
1759  size_type __result = 1;
1760  for (auto __ref = __it++;
1761  __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1762  ++__it)
1763  ++__result;
1764 
1765  return __result;
1766  }
1767 
1768 #if __cplusplus > 201703L
1769  template<typename _Key, typename _Value, typename _Alloc,
1770  typename _ExtractKey, typename _Equal,
1771  typename _Hash, typename _RangeHash, typename _Unused,
1772  typename _RehashPolicy, typename _Traits>
1773  template<typename _Kt, typename, typename>
1774  auto
1775  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1776  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1777  _M_count_tr(const _Kt& __k) const
1778  -> size_type
1779  {
1780  __hash_code __code = this->_M_hash_code_tr(__k);
1781  std::size_t __bkt = _M_bucket_index(__code);
1782  auto __n = _M_find_node_tr(__bkt, __k, __code);
1783  if (!__n)
1784  return 0;
1785 
1786  // All equivalent values are next to each other, if we find a
1787  // non-equivalent value after an equivalent one it means that we won't
1788  // find any new equivalent value.
1789  iterator __it(__n);
1790  size_type __result = 1;
1791  for (++__it;
1792  __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1793  ++__it)
1794  ++__result;
1795 
1796  return __result;
1797  }
1798 #endif
1799 
1800  template<typename _Key, typename _Value, typename _Alloc,
1801  typename _ExtractKey, typename _Equal,
1802  typename _Hash, typename _RangeHash, typename _Unused,
1803  typename _RehashPolicy, typename _Traits>
1804  auto
1805  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1806  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1807  equal_range(const key_type& __k)
1808  -> pair<iterator, iterator>
1809  {
1810  auto __ite = find(__k);
1811  if (!__ite._M_cur)
1812  return { __ite, __ite };
1813 
1814  auto __beg = __ite++;
1815  if (__unique_keys::value)
1816  return { __beg, __ite };
1817 
1818  // All equivalent values are next to each other, if we find a
1819  // non-equivalent value after an equivalent one it means that we won't
1820  // find any new equivalent value.
1821  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1822  ++__ite;
1823 
1824  return { __beg, __ite };
1825  }
1826 
1827  template<typename _Key, typename _Value, typename _Alloc,
1828  typename _ExtractKey, typename _Equal,
1829  typename _Hash, typename _RangeHash, typename _Unused,
1830  typename _RehashPolicy, typename _Traits>
1831  auto
1832  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1833  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1834  equal_range(const key_type& __k) const
1835  -> pair<const_iterator, const_iterator>
1836  {
1837  auto __ite = find(__k);
1838  if (!__ite._M_cur)
1839  return { __ite, __ite };
1840 
1841  auto __beg = __ite++;
1842  if (__unique_keys::value)
1843  return { __beg, __ite };
1844 
1845  // All equivalent values are next to each other, if we find a
1846  // non-equivalent value after an equivalent one it means that we won't
1847  // find any new equivalent value.
1848  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1849  ++__ite;
1850 
1851  return { __beg, __ite };
1852  }
1853 
1854 #if __cplusplus > 201703L
1855  template<typename _Key, typename _Value, typename _Alloc,
1856  typename _ExtractKey, typename _Equal,
1857  typename _Hash, typename _RangeHash, typename _Unused,
1858  typename _RehashPolicy, typename _Traits>
1859  template<typename _Kt, typename, typename>
1860  auto
1861  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1862  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1863  _M_equal_range_tr(const _Kt& __k)
1864  -> pair<iterator, iterator>
1865  {
1866  __hash_code __code = this->_M_hash_code_tr(__k);
1867  std::size_t __bkt = _M_bucket_index(__code);
1868  auto __n = _M_find_node_tr(__bkt, __k, __code);
1869  iterator __ite(__n);
1870  if (!__n)
1871  return { __ite, __ite };
1872 
1873  // All equivalent values are next to each other, if we find a
1874  // non-equivalent value after an equivalent one it means that we won't
1875  // find any new equivalent value.
1876  auto __beg = __ite++;
1877  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1878  ++__ite;
1879 
1880  return { __beg, __ite };
1881  }
1882 
1883  template<typename _Key, typename _Value, typename _Alloc,
1884  typename _ExtractKey, typename _Equal,
1885  typename _Hash, typename _RangeHash, typename _Unused,
1886  typename _RehashPolicy, typename _Traits>
1887  template<typename _Kt, typename, typename>
1888  auto
1889  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1890  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1891  _M_equal_range_tr(const _Kt& __k) const
1892  -> pair<const_iterator, const_iterator>
1893  {
1894  __hash_code __code = this->_M_hash_code_tr(__k);
1895  std::size_t __bkt = _M_bucket_index(__code);
1896  auto __n = _M_find_node_tr(__bkt, __k, __code);
1897  const_iterator __ite(__n);
1898  if (!__n)
1899  return { __ite, __ite };
1900 
1901  // All equivalent values are next to each other, if we find a
1902  // non-equivalent value after an equivalent one it means that we won't
1903  // find any new equivalent value.
1904  auto __beg = __ite++;
1905  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1906  ++__ite;
1907 
1908  return { __beg, __ite };
1909  }
1910 #endif
1911 
1912  // Find the node before the one whose key compares equal to k.
1913  // Return nullptr if no node is found.
1914  template<typename _Key, typename _Value, typename _Alloc,
1915  typename _ExtractKey, typename _Equal,
1916  typename _Hash, typename _RangeHash, typename _Unused,
1917  typename _RehashPolicy, typename _Traits>
1918  auto
1919  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1920  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1921  _M_find_before_node(const key_type& __k)
1922  -> __node_base_ptr
1923  {
1924  __node_base_ptr __prev_p = &_M_before_begin;
1925  if (!__prev_p->_M_nxt)
1926  return nullptr;
1927 
1928  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);
1929  __p != nullptr;
1930  __p = __p->_M_next())
1931  {
1932  if (this->_M_key_equals(__k, *__p))
1933  return __prev_p;
1934 
1935  __prev_p = __p;
1936  }
1937 
1938  return nullptr;
1939  }
1940 
1941  // Find the node before the one whose key compares equal to k in the bucket
1942  // bkt. Return nullptr if no node is found.
1943  template<typename _Key, typename _Value, typename _Alloc,
1944  typename _ExtractKey, typename _Equal,
1945  typename _Hash, typename _RangeHash, typename _Unused,
1946  typename _RehashPolicy, typename _Traits>
1947  auto
1948  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1949  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1950  _M_find_before_node(size_type __bkt, const key_type& __k,
1951  __hash_code __code) const
1952  -> __node_base_ptr
1953  {
1954  __node_base_ptr __prev_p = _M_buckets[__bkt];
1955  if (!__prev_p)
1956  return nullptr;
1957 
1958  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1959  __p = __p->_M_next())
1960  {
1961  if (this->_M_equals(__k, __code, *__p))
1962  return __prev_p;
1963 
1964  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1965  break;
1966  __prev_p = __p;
1967  }
1968 
1969  return nullptr;
1970  }
1971 
1972  template<typename _Key, typename _Value, typename _Alloc,
1973  typename _ExtractKey, typename _Equal,
1974  typename _Hash, typename _RangeHash, typename _Unused,
1975  typename _RehashPolicy, typename _Traits>
1976  template<typename _Kt>
1977  auto
1978  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1979  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1980  _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1981  __hash_code __code) const
1982  -> __node_base_ptr
1983  {
1984  __node_base_ptr __prev_p = _M_buckets[__bkt];
1985  if (!__prev_p)
1986  return nullptr;
1987 
1988  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1989  __p = __p->_M_next())
1990  {
1991  if (this->_M_equals_tr(__k, __code, *__p))
1992  return __prev_p;
1993 
1994  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1995  break;
1996  __prev_p = __p;
1997  }
1998 
1999  return nullptr;
2000  }
2001 
2002  template<typename _Key, typename _Value, typename _Alloc,
2003  typename _ExtractKey, typename _Equal,
2004  typename _Hash, typename _RangeHash, typename _Unused,
2005  typename _RehashPolicy, typename _Traits>
2006  void
2007  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2008  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2009  _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
2010  {
2011  if (_M_buckets[__bkt])
2012  {
2013  // Bucket is not empty, we just need to insert the new node
2014  // after the bucket before begin.
2015  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
2016  _M_buckets[__bkt]->_M_nxt = __node;
2017  }
2018  else
2019  {
2020  // The bucket is empty, the new node is inserted at the
2021  // beginning of the singly-linked list and the bucket will
2022  // contain _M_before_begin pointer.
2023  __node->_M_nxt = _M_before_begin._M_nxt;
2024  _M_before_begin._M_nxt = __node;
2025 
2026  if (__node->_M_nxt)
2027  // We must update former begin bucket that is pointing to
2028  // _M_before_begin.
2029  _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
2030 
2031  _M_buckets[__bkt] = &_M_before_begin;
2032  }
2033  }
2034 
2035  template<typename _Key, typename _Value, typename _Alloc,
2036  typename _ExtractKey, typename _Equal,
2037  typename _Hash, typename _RangeHash, typename _Unused,
2038  typename _RehashPolicy, typename _Traits>
2039  void
2040  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2041  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2042  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
2043  size_type __next_bkt)
2044  {
2045  if (!__next || __next_bkt != __bkt)
2046  {
2047  // Bucket is now empty
2048  // First update next bucket if any
2049  if (__next)
2050  _M_buckets[__next_bkt] = _M_buckets[__bkt];
2051 
2052  // Second update before begin node if necessary
2053  if (&_M_before_begin == _M_buckets[__bkt])
2054  _M_before_begin._M_nxt = __next;
2055  _M_buckets[__bkt] = nullptr;
2056  }
2057  }
2058 
2059  template<typename _Key, typename _Value, typename _Alloc,
2060  typename _ExtractKey, typename _Equal,
2061  typename _Hash, typename _RangeHash, typename _Unused,
2062  typename _RehashPolicy, typename _Traits>
2063  auto
2064  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2065  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2066  _M_get_previous_node(size_type __bkt, __node_ptr __n)
2067  -> __node_base_ptr
2068  {
2069  __node_base_ptr __prev_n = _M_buckets[__bkt];
2070  while (__prev_n->_M_nxt != __n)
2071  __prev_n = __prev_n->_M_nxt;
2072  return __prev_n;
2073  }
2074 
2075  template<typename _Key, typename _Value, typename _Alloc,
2076  typename _ExtractKey, typename _Equal,
2077  typename _Hash, typename _RangeHash, typename _Unused,
2078  typename _RehashPolicy, typename _Traits>
2079  template<typename... _Args>
2080  auto
2081  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2082  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2083  _M_emplace(true_type /* __uks */, _Args&&... __args)
2084  -> pair<iterator, bool>
2085  {
2086  // First build the node to get access to the hash code
2087  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2088  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2089  if (size() <= __small_size_threshold())
2090  {
2091  for (auto __it = begin(); __it != end(); ++__it)
2092  if (this->_M_key_equals(__k, *__it._M_cur))
2093  // There is already an equivalent node, no insertion
2094  return { __it, false };
2095  }
2096 
2097  __hash_code __code = this->_M_hash_code(__k);
2098  size_type __bkt = _M_bucket_index(__code);
2099  if (size() > __small_size_threshold())
2100  if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
2101  // There is already an equivalent node, no insertion
2102  return { iterator(__p), false };
2103 
2104  // Insert the node
2105  auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2106  __node._M_node = nullptr;
2107  return { __pos, true };
2108  }
2109 
2110  template<typename _Key, typename _Value, typename _Alloc,
2111  typename _ExtractKey, typename _Equal,
2112  typename _Hash, typename _RangeHash, typename _Unused,
2113  typename _RehashPolicy, typename _Traits>
2114  template<typename... _Args>
2115  auto
2116  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2117  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2118  _M_emplace(const_iterator __hint, false_type /* __uks */,
2119  _Args&&... __args)
2120  -> iterator
2121  {
2122  // First build the node to get its hash code.
2123  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2124  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2125 
2126  auto __res = this->_M_compute_hash_code(__hint, __k);
2127  auto __pos
2128  = _M_insert_multi_node(__res.first._M_cur, __res.second,
2129  __node._M_node);
2130  __node._M_node = nullptr;
2131  return __pos;
2132  }
2133 
2134  template<typename _Key, typename _Value, typename _Alloc,
2135  typename _ExtractKey, typename _Equal,
2136  typename _Hash, typename _RangeHash, typename _Unused,
2137  typename _RehashPolicy, typename _Traits>
2138  auto
2139  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2140  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2141  _M_compute_hash_code(const_iterator __hint, const key_type& __k) const
2142  -> pair<const_iterator, __hash_code>
2143  {
2144  if (size() <= __small_size_threshold())
2145  {
2146  if (__hint != cend())
2147  {
2148  for (auto __it = __hint; __it != cend(); ++__it)
2149  if (this->_M_key_equals(__k, *__it._M_cur))
2150  return { __it, this->_M_hash_code(*__it._M_cur) };
2151  }
2152 
2153  for (auto __it = cbegin(); __it != __hint; ++__it)
2154  if (this->_M_key_equals(__k, *__it._M_cur))
2155  return { __it, this->_M_hash_code(*__it._M_cur) };
2156  }
2157 
2158  return { __hint, this->_M_hash_code(__k) };
2159  }
2160 
2161  template<typename _Key, typename _Value, typename _Alloc,
2162  typename _ExtractKey, typename _Equal,
2163  typename _Hash, typename _RangeHash, typename _Unused,
2164  typename _RehashPolicy, typename _Traits>
2165  auto
2166  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2167  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2168  _M_insert_unique_node(size_type __bkt, __hash_code __code,
2169  __node_ptr __node, size_type __n_elt)
2170  -> iterator
2171  {
2172  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2173  std::pair<bool, std::size_t> __do_rehash
2174  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2175  __n_elt);
2176 
2177  if (__do_rehash.first)
2178  {
2179  _M_rehash(__do_rehash.second, __saved_state);
2180  __bkt = _M_bucket_index(__code);
2181  }
2182 
2183  this->_M_store_code(*__node, __code);
2184 
2185  // Always insert at the beginning of the bucket.
2186  _M_insert_bucket_begin(__bkt, __node);
2187  ++_M_element_count;
2188  return iterator(__node);
2189  }
2190 
2191  template<typename _Key, typename _Value, typename _Alloc,
2192  typename _ExtractKey, typename _Equal,
2193  typename _Hash, typename _RangeHash, typename _Unused,
2194  typename _RehashPolicy, typename _Traits>
2195  auto
2196  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2197  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2198  _M_insert_multi_node(__node_ptr __hint,
2199  __hash_code __code, __node_ptr __node)
2200  -> iterator
2201  {
2202  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2203  std::pair<bool, std::size_t> __do_rehash
2204  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2205 
2206  if (__do_rehash.first)
2207  _M_rehash(__do_rehash.second, __saved_state);
2208 
2209  this->_M_store_code(*__node, __code);
2210  const key_type& __k = _ExtractKey{}(__node->_M_v());
2211  size_type __bkt = _M_bucket_index(__code);
2212 
2213  // Find the node before an equivalent one or use hint if it exists and
2214  // if it is equivalent.
2215  __node_base_ptr __prev
2216  = __builtin_expect(__hint != nullptr, false)
2217  && this->_M_equals(__k, __code, *__hint)
2218  ? __hint
2219  : _M_find_before_node(__bkt, __k, __code);
2220 
2221  if (__prev)
2222  {
2223  // Insert after the node before the equivalent one.
2224  __node->_M_nxt = __prev->_M_nxt;
2225  __prev->_M_nxt = __node;
2226  if (__builtin_expect(__prev == __hint, false))
2227  // hint might be the last bucket node, in this case we need to
2228  // update next bucket.
2229  if (__node->_M_nxt
2230  && !this->_M_equals(__k, __code, *__node->_M_next()))
2231  {
2232  size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2233  if (__next_bkt != __bkt)
2234  _M_buckets[__next_bkt] = __node;
2235  }
2236  }
2237  else
2238  // The inserted node has no equivalent in the hashtable. We must
2239  // insert the new node at the beginning of the bucket to preserve
2240  // equivalent elements' relative positions.
2241  _M_insert_bucket_begin(__bkt, __node);
2242  ++_M_element_count;
2243  return iterator(__node);
2244  }
2245 
2246  // Insert v if no element with its key is already present.
2247  template<typename _Key, typename _Value, typename _Alloc,
2248  typename _ExtractKey, typename _Equal,
2249  typename _Hash, typename _RangeHash, typename _Unused,
2250  typename _RehashPolicy, typename _Traits>
2251  template<typename _Kt, typename _Arg, typename _NodeGenerator>
2252  auto
2253  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2254  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2255  _M_insert_unique(_Kt&& __k, _Arg&& __v,
2256  const _NodeGenerator& __node_gen)
2257  -> pair<iterator, bool>
2258  {
2259  if (size() <= __small_size_threshold())
2260  for (auto __it = begin(); __it != end(); ++__it)
2261  if (this->_M_key_equals_tr(__k, *__it._M_cur))
2262  return { __it, false };
2263 
2264  __hash_code __code = this->_M_hash_code_tr(__k);
2265  size_type __bkt = _M_bucket_index(__code);
2266 
2267  if (size() > __small_size_threshold())
2268  if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
2269  return { iterator(__node), false };
2270 
2271  _Scoped_node __node {
2272  __node_builder_t::_S_build(std::forward<_Kt>(__k),
2273  std::forward<_Arg>(__v),
2274  __node_gen),
2275  this
2276  };
2277  auto __pos
2278  = _M_insert_unique_node(__bkt, __code, __node._M_node);
2279  __node._M_node = nullptr;
2280  return { __pos, true };
2281  }
2282 
2283  // Insert v unconditionally.
2284  template<typename _Key, typename _Value, typename _Alloc,
2285  typename _ExtractKey, typename _Equal,
2286  typename _Hash, typename _RangeHash, typename _Unused,
2287  typename _RehashPolicy, typename _Traits>
2288  template<typename _Arg, typename _NodeGenerator>
2289  auto
2290  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2291  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2292  _M_insert(const_iterator __hint, _Arg&& __v,
2293  const _NodeGenerator& __node_gen,
2294  false_type /* __uks */)
2295  -> iterator
2296  {
2297  // First allocate new node so that we don't do anything if it throws.
2298  _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2299 
2300  // Second compute the hash code so that we don't rehash if it throws.
2301  auto __res = this->_M_compute_hash_code(
2302  __hint, _ExtractKey{}(__node._M_node->_M_v()));
2303 
2304  auto __pos
2305  = _M_insert_multi_node(__res.first._M_cur, __res.second,
2306  __node._M_node);
2307  __node._M_node = nullptr;
2308  return __pos;
2309  }
2310 
2311  template<typename _Key, typename _Value, typename _Alloc,
2312  typename _ExtractKey, typename _Equal,
2313  typename _Hash, typename _RangeHash, typename _Unused,
2314  typename _RehashPolicy, typename _Traits>
2315  auto
2316  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2317  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2318  erase(const_iterator __it)
2319  -> iterator
2320  {
2321  __node_ptr __n = __it._M_cur;
2322  std::size_t __bkt = _M_bucket_index(*__n);
2323 
2324  // Look for previous node to unlink it from the erased one, this
2325  // is why we need buckets to contain the before begin to make
2326  // this search fast.
2327  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2328  return _M_erase(__bkt, __prev_n, __n);
2329  }
2330 
2331  template<typename _Key, typename _Value, typename _Alloc,
2332  typename _ExtractKey, typename _Equal,
2333  typename _Hash, typename _RangeHash, typename _Unused,
2334  typename _RehashPolicy, typename _Traits>
2335  auto
2336  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2337  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2338  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2339  -> iterator
2340  {
2341  if (__prev_n == _M_buckets[__bkt])
2342  _M_remove_bucket_begin(__bkt, __n->_M_next(),
2343  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2344  else if (__n->_M_nxt)
2345  {
2346  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2347  if (__next_bkt != __bkt)
2348  _M_buckets[__next_bkt] = __prev_n;
2349  }
2350 
2351  __prev_n->_M_nxt = __n->_M_nxt;
2352  iterator __result(__n->_M_next());
2353  this->_M_deallocate_node(__n);
2354  --_M_element_count;
2355 
2356  return __result;
2357  }
2358 
2359  template<typename _Key, typename _Value, typename _Alloc,
2360  typename _ExtractKey, typename _Equal,
2361  typename _Hash, typename _RangeHash, typename _Unused,
2362  typename _RehashPolicy, typename _Traits>
2363  auto
2364  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2365  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2366  _M_erase(true_type /* __uks */, const key_type& __k)
2367  -> size_type
2368  {
2369  __node_base_ptr __prev_n;
2370  __node_ptr __n;
2371  std::size_t __bkt;
2372  if (size() <= __small_size_threshold())
2373  {
2374  __prev_n = _M_find_before_node(__k);
2375  if (!__prev_n)
2376  return 0;
2377 
2378  // We found a matching node, erase it.
2379  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2380  __bkt = _M_bucket_index(*__n);
2381  }
2382  else
2383  {
2384  __hash_code __code = this->_M_hash_code(__k);
2385  __bkt = _M_bucket_index(__code);
2386 
2387  // Look for the node before the first matching node.
2388  __prev_n = _M_find_before_node(__bkt, __k, __code);
2389  if (!__prev_n)
2390  return 0;
2391 
2392  // We found a matching node, erase it.
2393  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2394  }
2395 
2396  _M_erase(__bkt, __prev_n, __n);
2397  return 1;
2398  }
2399 
2400  template<typename _Key, typename _Value, typename _Alloc,
2401  typename _ExtractKey, typename _Equal,
2402  typename _Hash, typename _RangeHash, typename _Unused,
2403  typename _RehashPolicy, typename _Traits>
2404  auto
2405  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2406  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2407  _M_erase(false_type /* __uks */, const key_type& __k)
2408  -> size_type
2409  {
2410  std::size_t __bkt;
2411  __node_base_ptr __prev_n;
2412  __node_ptr __n;
2413  if (size() <= __small_size_threshold())
2414  {
2415  __prev_n = _M_find_before_node(__k);
2416  if (!__prev_n)
2417  return 0;
2418 
2419  // We found a matching node, erase it.
2420  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2421  __bkt = _M_bucket_index(*__n);
2422  }
2423  else
2424  {
2425  __hash_code __code = this->_M_hash_code(__k);
2426  __bkt = _M_bucket_index(__code);
2427 
2428  // Look for the node before the first matching node.
2429  __prev_n = _M_find_before_node(__bkt, __k, __code);
2430  if (!__prev_n)
2431  return 0;
2432 
2433  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2434  }
2435 
2436  // _GLIBCXX_RESOLVE_LIB_DEFECTS
2437  // 526. Is it undefined if a function in the standard changes
2438  // in parameters?
2439  // We use one loop to find all matching nodes and another to deallocate
2440  // them so that the key stays valid during the first loop. It might be
2441  // invalidated indirectly when destroying nodes.
2442  __node_ptr __n_last = __n->_M_next();
2443  while (__n_last && this->_M_node_equals(*__n, *__n_last))
2444  __n_last = __n_last->_M_next();
2445 
2446  std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2447 
2448  // Deallocate nodes.
2449  size_type __result = 0;
2450  do
2451  {
2452  __node_ptr __p = __n->_M_next();
2453  this->_M_deallocate_node(__n);
2454  __n = __p;
2455  ++__result;
2456  }
2457  while (__n != __n_last);
2458 
2459  _M_element_count -= __result;
2460  if (__prev_n == _M_buckets[__bkt])
2461  _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2462  else if (__n_last_bkt != __bkt)
2463  _M_buckets[__n_last_bkt] = __prev_n;
2464  __prev_n->_M_nxt = __n_last;
2465  return __result;
2466  }
2467 
2468  template<typename _Key, typename _Value, typename _Alloc,
2469  typename _ExtractKey, typename _Equal,
2470  typename _Hash, typename _RangeHash, typename _Unused,
2471  typename _RehashPolicy, typename _Traits>
2472  auto
2473  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2474  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2475  erase(const_iterator __first, const_iterator __last)
2476  -> iterator
2477  {
2478  __node_ptr __n = __first._M_cur;
2479  __node_ptr __last_n = __last._M_cur;
2480  if (__n == __last_n)
2481  return iterator(__n);
2482 
2483  std::size_t __bkt = _M_bucket_index(*__n);
2484 
2485  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2486  bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2487  std::size_t __n_bkt = __bkt;
2488  for (;;)
2489  {
2490  do
2491  {
2492  __node_ptr __tmp = __n;
2493  __n = __n->_M_next();
2494  this->_M_deallocate_node(__tmp);
2495  --_M_element_count;
2496  if (!__n)
2497  break;
2498  __n_bkt = _M_bucket_index(*__n);
2499  }
2500  while (__n != __last_n && __n_bkt == __bkt);
2501  if (__is_bucket_begin)
2502  _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2503  if (__n == __last_n)
2504  break;
2505  __is_bucket_begin = true;
2506  __bkt = __n_bkt;
2507  }
2508 
2509  if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2510  _M_buckets[__n_bkt] = __prev_n;
2511  __prev_n->_M_nxt = __n;
2512  return iterator(__n);
2513  }
2514 
2515  template<typename _Key, typename _Value, typename _Alloc,
2516  typename _ExtractKey, typename _Equal,
2517  typename _Hash, typename _RangeHash, typename _Unused,
2518  typename _RehashPolicy, typename _Traits>
2519  void
2520  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2521  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2522  clear() noexcept
2523  {
2524  this->_M_deallocate_nodes(_M_begin());
2525  __builtin_memset(_M_buckets, 0,
2526  _M_bucket_count * sizeof(__node_base_ptr));
2527  _M_element_count = 0;
2528  _M_before_begin._M_nxt = nullptr;
2529  }
2530 
2531  template<typename _Key, typename _Value, typename _Alloc,
2532  typename _ExtractKey, typename _Equal,
2533  typename _Hash, typename _RangeHash, typename _Unused,
2534  typename _RehashPolicy, typename _Traits>
2535  void
2536  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2537  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2538  rehash(size_type __bkt_count)
2539  {
2540  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2541  __bkt_count
2542  = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2543  __bkt_count);
2544  __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2545 
2546  if (__bkt_count != _M_bucket_count)
2547  _M_rehash(__bkt_count, __saved_state);
2548  else
2549  // No rehash, restore previous state to keep it consistent with
2550  // container state.
2551  _M_rehash_policy._M_reset(__saved_state);
2552  }
2553 
2554  template<typename _Key, typename _Value, typename _Alloc,
2555  typename _ExtractKey, typename _Equal,
2556  typename _Hash, typename _RangeHash, typename _Unused,
2557  typename _RehashPolicy, typename _Traits>
2558  void
2559  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2560  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2561  _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2562  {
2563  __try
2564  {
2565  _M_rehash_aux(__bkt_count, __unique_keys{});
2566  }
2567  __catch(...)
2568  {
2569  // A failure here means that buckets allocation failed. We only
2570  // have to restore hash policy previous state.
2571  _M_rehash_policy._M_reset(__state);
2572  __throw_exception_again;
2573  }
2574  }
2575 
2576  // Rehash when there is no equivalent elements.
2577  template<typename _Key, typename _Value, typename _Alloc,
2578  typename _ExtractKey, typename _Equal,
2579  typename _Hash, typename _RangeHash, typename _Unused,
2580  typename _RehashPolicy, typename _Traits>
2581  void
2582  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2583  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2584  _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2585  {
2586  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2587  __node_ptr __p = _M_begin();
2588  _M_before_begin._M_nxt = nullptr;
2589  std::size_t __bbegin_bkt = 0;
2590  while (__p)
2591  {
2592  __node_ptr __next = __p->_M_next();
2593  std::size_t __bkt
2594  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2595  if (!__new_buckets[__bkt])
2596  {
2597  __p->_M_nxt = _M_before_begin._M_nxt;
2598  _M_before_begin._M_nxt = __p;
2599  __new_buckets[__bkt] = &_M_before_begin;
2600  if (__p->_M_nxt)
2601  __new_buckets[__bbegin_bkt] = __p;
2602  __bbegin_bkt = __bkt;
2603  }
2604  else
2605  {
2606  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2607  __new_buckets[__bkt]->_M_nxt = __p;
2608  }
2609 
2610  __p = __next;
2611  }
2612 
2613  _M_deallocate_buckets();
2614  _M_bucket_count = __bkt_count;
2615  _M_buckets = __new_buckets;
2616  }
2617 
2618  // Rehash when there can be equivalent elements, preserve their relative
2619  // order.
2620  template<typename _Key, typename _Value, typename _Alloc,
2621  typename _ExtractKey, typename _Equal,
2622  typename _Hash, typename _RangeHash, typename _Unused,
2623  typename _RehashPolicy, typename _Traits>
2624  void
2625  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2626  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2627  _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2628  {
2629  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2630  __node_ptr __p = _M_begin();
2631  _M_before_begin._M_nxt = nullptr;
2632  std::size_t __bbegin_bkt = 0;
2633  std::size_t __prev_bkt = 0;
2634  __node_ptr __prev_p = nullptr;
2635  bool __check_bucket = false;
2636 
2637  while (__p)
2638  {
2639  __node_ptr __next = __p->_M_next();
2640  std::size_t __bkt
2641  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2642 
2643  if (__prev_p && __prev_bkt == __bkt)
2644  {
2645  // Previous insert was already in this bucket, we insert after
2646  // the previously inserted one to preserve equivalent elements
2647  // relative order.
2648  __p->_M_nxt = __prev_p->_M_nxt;
2649  __prev_p->_M_nxt = __p;
2650 
2651  // Inserting after a node in a bucket require to check that we
2652  // haven't change the bucket last node, in this case next
2653  // bucket containing its before begin node must be updated. We
2654  // schedule a check as soon as we move out of the sequence of
2655  // equivalent nodes to limit the number of checks.
2656  __check_bucket = true;
2657  }
2658  else
2659  {
2660  if (__check_bucket)
2661  {
2662  // Check if we shall update the next bucket because of
2663  // insertions into __prev_bkt bucket.
2664  if (__prev_p->_M_nxt)
2665  {
2666  std::size_t __next_bkt
2667  = __hash_code_base::_M_bucket_index(
2668  *__prev_p->_M_next(), __bkt_count);
2669  if (__next_bkt != __prev_bkt)
2670  __new_buckets[__next_bkt] = __prev_p;
2671  }
2672  __check_bucket = false;
2673  }
2674 
2675  if (!__new_buckets[__bkt])
2676  {
2677  __p->_M_nxt = _M_before_begin._M_nxt;
2678  _M_before_begin._M_nxt = __p;
2679  __new_buckets[__bkt] = &_M_before_begin;
2680  if (__p->_M_nxt)
2681  __new_buckets[__bbegin_bkt] = __p;
2682  __bbegin_bkt = __bkt;
2683  }
2684  else
2685  {
2686  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2687  __new_buckets[__bkt]->_M_nxt = __p;
2688  }
2689  }
2690  __prev_p = __p;
2691  __prev_bkt = __bkt;
2692  __p = __next;
2693  }
2694 
2695  if (__check_bucket && __prev_p->_M_nxt)
2696  {
2697  std::size_t __next_bkt
2698  = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2699  __bkt_count);
2700  if (__next_bkt != __prev_bkt)
2701  __new_buckets[__next_bkt] = __prev_p;
2702  }
2703 
2704  _M_deallocate_buckets();
2705  _M_bucket_count = __bkt_count;
2706  _M_buckets = __new_buckets;
2707  }
2708 
2709 #if __cplusplus > 201402L
2710  template<typename, typename, typename> class _Hash_merge_helper { };
2711 #endif // C++17
2712 
2713 #if __cpp_deduction_guides >= 201606
2714  // Used to constrain deduction guides
2715  template<typename _Hash>
2716  using _RequireNotAllocatorOrIntegral
2717  = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2718 #endif
2719 
2720 /// @endcond
2721 _GLIBCXX_END_NAMESPACE_VERSION
2722 } // namespace std
2723 
2724 #endif // _HASHTABLE_H
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
_T1 first
The first member.
Definition: stl_pair.h:193
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1227
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:51
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition: move.h:70
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1249
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:257
Struct holding two objects of arbitrary type.
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:126
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:284
_T2 second
The second member.
Definition: stl_pair.h:194
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:264
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:82
ISO C++ entities toplevel namespace is std.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:138
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:85
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:97