1// hashtable.h header -*- C++ -*-
2
3// Copyright (C) 2007-2021 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30#ifndef _HASHTABLE_H
31#define _HASHTABLE_H 1
32
33#pragma GCC system_header
34
35#include <bits/hashtable_policy.h>
36#include <bits/enable_special_members.h>
37#if __cplusplus > 201402L
38# include <bits/node_handle.h>
39#endif
40
41namespace std _GLIBCXX_VISIBILITY(default)
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 template<typename _Tp, typename _Hash>
46 using __cache_default
47 = __not_<__and_<// Do not cache for fast hasher.
48 __is_fast_hash<_Hash>,
49 // Mandatory to have erase not throwing.
50 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
51
52 // Helper to conditionally delete the default constructor.
53 // The _Hash_node_base type is used to distinguish this specialization
54 // from any other potentially-overlapping subobjects of the hashtable.
55 template<typename _Equal, typename _Hash, typename _Allocator>
56 using _Hashtable_enable_default_ctor
57 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
58 is_default_constructible<_Hash>,
59 is_default_constructible<_Allocator>>{},
60 __detail::_Hash_node_base>;
61
62 /**
63 * Primary class template _Hashtable.
64 *
65 * @ingroup hashtable-detail
66 *
67 * @tparam _Value CopyConstructible type.
68 *
69 * @tparam _Key CopyConstructible type.
70 *
71 * @tparam _Alloc An allocator type
72 * ([lib.allocator.requirements]) whose _Alloc::value_type is
73 * _Value. As a conforming extension, we allow for
74 * _Alloc::value_type != _Value.
75 *
76 * @tparam _ExtractKey Function object that takes an object of type
77 * _Value and returns a value of type _Key.
78 *
79 * @tparam _Equal Function object that takes two objects of type k
80 * and returns a bool-like value that is true if the two objects
81 * are considered equal.
82 *
83 * @tparam _Hash The hash function. A unary function object with
84 * argument type _Key and result type size_t. Return values should
85 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
86 *
87 * @tparam _RangeHash The range-hashing function (in the terminology of
88 * Tavori and Dreizin). A binary function object whose argument
89 * types and result type are all size_t. Given arguments r and N,
90 * the return value is in the range [0, N).
91 *
92 * @tparam _Unused Not used.
93 *
94 * @tparam _RehashPolicy Policy class with three members, all of
95 * which govern the bucket count. _M_next_bkt(n) returns a bucket
96 * count no smaller than n. _M_bkt_for_elements(n) returns a
97 * bucket count appropriate for an element count of n.
98 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
99 * current bucket count is n_bkt and the current element count is
100 * n_elt, we need to increase the bucket count for n_ins insertions.
101 * If so, returns make_pair(true, n), where n is the new bucket count. If
102 * not, returns make_pair(false, <anything>)
103 *
104 * @tparam _Traits Compile-time class with three boolean
105 * std::integral_constant members: __cache_hash_code, __constant_iterators,
106 * __unique_keys.
107 *
108 * Each _Hashtable data structure has:
109 *
110 * - _Bucket[] _M_buckets
111 * - _Hash_node_base _M_before_begin
112 * - size_type _M_bucket_count
113 * - size_type _M_element_count
114 *
115 * with _Bucket being _Hash_node_base* and _Hash_node containing:
116 *
117 * - _Hash_node* _M_next
118 * - Tp _M_value
119 * - size_t _M_hash_code if cache_hash_code is true
120 *
121 * In terms of Standard containers the hashtable is like the aggregation of:
122 *
123 * - std::forward_list<_Node> containing the elements
124 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
125 *
126 * The non-empty buckets contain the node before the first node in the
127 * bucket. This design makes it possible to implement something like a
128 * std::forward_list::insert_after on container insertion and
129 * std::forward_list::erase_after on container erase
130 * calls. _M_before_begin is equivalent to
131 * std::forward_list::before_begin. Empty buckets contain
132 * nullptr. Note that one of the non-empty buckets contains
133 * &_M_before_begin which is not a dereferenceable node so the
134 * node pointer in a bucket shall never be dereferenced, only its
135 * next node can be.
136 *
137 * Walking through a bucket's nodes requires a check on the hash code to
138 * see if each node is still in the bucket. Such a design assumes a
139 * quite efficient hash functor and is one of the reasons it is
140 * highly advisable to set __cache_hash_code to true.
141 *
142 * The container iterators are simply built from nodes. This way
143 * incrementing the iterator is perfectly efficient independent of
144 * how many empty buckets there are in the container.
145 *
146 * On insert we compute the element's hash code and use it to find the
147 * bucket index. If the element must be inserted in an empty bucket
148 * we add it at the beginning of the singly linked list and make the
149 * bucket point to _M_before_begin. The bucket that used to point to
150 * _M_before_begin, if any, is updated to point to its new before
151 * begin node.
152 *
153 * On erase, the simple iterator design requires using the hash
154 * functor to get the index of the bucket to update. For this
155 * reason, when __cache_hash_code is set to false the hash functor must
156 * not throw and this is enforced by a static assertion.
157 *
158 * Functionality is implemented by decomposition into base classes,
159 * where the derived _Hashtable class is used in _Map_base,
160 * _Insert, _Rehash_base, and _Equality base classes to access the
161 * "this" pointer. _Hashtable_base is used in the base classes as a
162 * non-recursive, fully-completed-type so that detailed nested type
163 * information, such as iterator type and node type, can be
164 * used. This is similar to the "Curiously Recurring Template
165 * Pattern" (CRTP) technique, but uses a reconstructed, not
166 * explicitly passed, template pattern.
167 *
168 * Base class templates are:
169 * - __detail::_Hashtable_base
170 * - __detail::_Map_base
171 * - __detail::_Insert
172 * - __detail::_Rehash_base
173 * - __detail::_Equality
174 */
175 template<typename _Key, typename _Value, typename _Alloc,
176 typename _ExtractKey, typename _Equal,
177 typename _Hash, typename _RangeHash, typename _Unused,
178 typename _RehashPolicy, typename _Traits>
179 class _Hashtable
180 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
181 _Hash, _RangeHash, _Unused, _Traits>,
182 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
183 _Hash, _RangeHash, _Unused,
184 _RehashPolicy, _Traits>,
185 public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
186 _Hash, _RangeHash, _Unused,
187 _RehashPolicy, _Traits>,
188 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
189 _Hash, _RangeHash, _Unused,
190 _RehashPolicy, _Traits>,
191 public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
192 _Hash, _RangeHash, _Unused,
193 _RehashPolicy, _Traits>,
194 private __detail::_Hashtable_alloc<
195 __alloc_rebind<_Alloc,
196 __detail::_Hash_node<_Value,
197 _Traits::__hash_cached::value>>>,
198 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
199 {
200 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
201 "unordered container must have a non-const, non-volatile value_type");
202#if __cplusplus > 201703L || defined __STRICT_ANSI__
203 static_assert(is_same<typename _Alloc::value_type, _Value>{},
204 "unordered container must have the same value_type as its allocator");
205#endif
206
207 using __traits_type = _Traits;
208 using __hash_cached = typename __traits_type::__hash_cached;
209 using __constant_iterators = typename __traits_type::__constant_iterators;
210 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
211 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
212
213 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
214
215 using __node_value_type =
216 __detail::_Hash_node_value<_Value, __hash_cached::value>;
217 using __node_ptr = typename __hashtable_alloc::__node_ptr;
218 using __value_alloc_traits =
219 typename __hashtable_alloc::__value_alloc_traits;
220 using __node_alloc_traits =
221 typename __hashtable_alloc::__node_alloc_traits;
222 using __node_base = typename __hashtable_alloc::__node_base;
223 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
224 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
225
226 using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
227 _Equal, _Hash,
228 _RangeHash, _Unused,
229 _RehashPolicy, _Traits>;
230 using __enable_default_ctor
231 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
232
233 public:
234 typedef _Key key_type;
235 typedef _Value value_type;
236 typedef _Alloc allocator_type;
237 typedef _Equal key_equal;
238
239 // mapped_type, if present, comes from _Map_base.
240 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
241 typedef typename __value_alloc_traits::pointer pointer;
242 typedef typename __value_alloc_traits::const_pointer const_pointer;
243 typedef value_type& reference;
244 typedef const value_type& const_reference;
245
246 using iterator = typename __insert_base::iterator;
247
248 using const_iterator = typename __insert_base::const_iterator;
249
250 using local_iterator = __detail::_Local_iterator<key_type, _Value,
251 _ExtractKey, _Hash, _RangeHash, _Unused,
252 __constant_iterators::value,
253 __hash_cached::value>;
254
255 using const_local_iterator = __detail::_Local_const_iterator<
256 key_type, _Value,
257 _ExtractKey, _Hash, _RangeHash, _Unused,
258 __constant_iterators::value, __hash_cached::value>;
259
260 private:
261 using __rehash_type = _RehashPolicy;
262 using __rehash_state = typename __rehash_type::_State;
263
264 using __unique_keys = typename __traits_type::__unique_keys;
265
266 using __hashtable_base = __detail::
267 _Hashtable_base<_Key, _Value, _ExtractKey,
268 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
269
270 using __hash_code_base = typename __hashtable_base::__hash_code_base;
271 using __hash_code = typename __hashtable_base::__hash_code;
272 using __ireturn_type = typename __insert_base::__ireturn_type;
273
274 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
275 _Equal, _Hash, _RangeHash, _Unused,
276 _RehashPolicy, _Traits>;
277
278 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
279 _ExtractKey, _Equal,
280 _Hash, _RangeHash, _Unused,
281 _RehashPolicy, _Traits>;
282
283 using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
284 _Equal, _Hash, _RangeHash, _Unused,
285 _RehashPolicy, _Traits>;
286
287 using __reuse_or_alloc_node_gen_t =
288 __detail::_ReuseOrAllocNode<__node_alloc_type>;
289 using __alloc_node_gen_t =
290 __detail::_AllocNode<__node_alloc_type>;
291
292 // Simple RAII type for managing a node containing an element
293 struct _Scoped_node
294 {
295 // Take ownership of a node with a constructed element.
296 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
297 : _M_h(__h), _M_node(__n) { }
298
299 // Allocate a node and construct an element within it.
300 template<typename... _Args>
301 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
302 : _M_h(__h),
303 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
304 { }
305
306 // Destroy element and deallocate node.
307 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
308
309 _Scoped_node(const _Scoped_node&) = delete;
310 _Scoped_node& operator=(const _Scoped_node&) = delete;
311
312 __hashtable_alloc* _M_h;
313 __node_ptr _M_node;
314 };
315
316 template<typename _Ht>
317 static constexpr
318 typename conditional<std::is_lvalue_reference<_Ht>::value,
319 const value_type&, value_type&&>::type
320 __fwd_value_for(value_type& __val) noexcept
321 { return std::move(__val); }
322
323 // Compile-time diagnostics.
324
325 // _Hash_code_base has everything protected, so use this derived type to
326 // access it.
327 struct __hash_code_base_access : __hash_code_base
328 { using __hash_code_base::_M_bucket_index; };
329
330 // Getting a bucket index from a node shall not throw because it is used
331 // in methods (erase, swap...) that shall not throw.
332 static_assert(noexcept(declval<const __hash_code_base_access&>()
333 ._M_bucket_index(declval<const __node_value_type&>(),
334 (std::size_t)0)),
335 "Cache the hash code or qualify your functors involved"
336 " in hash code and bucket index computation with noexcept");
337
338 // To get bucket index we need _RangeHash not to throw.
339 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
340 "Functor used to map hash code to bucket index"
341 " must be nothrow default constructible");
342 static_assert(noexcept(
343 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
344 "Functor used to map hash code to bucket index must be"
345 " noexcept");
346
347 // To compute bucket index we also need _ExtratKey not to throw.
348 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
349 "_ExtractKey must be nothrow default constructible");
350 static_assert(noexcept(
351 std::declval<const _ExtractKey&>()(std::declval<_Value>())),
352 "_ExtractKey functor must be noexcept invocable");
353
354 template<typename _Keya, typename _Valuea, typename _Alloca,
355 typename _ExtractKeya, typename _Equala,
356 typename _Hasha, typename _RangeHasha, typename _Unuseda,
357 typename _RehashPolicya, typename _Traitsa,
358 bool _Unique_keysa>
359 friend struct __detail::_Map_base;
360
361 template<typename _Keya, typename _Valuea, typename _Alloca,
362 typename _ExtractKeya, typename _Equala,
363 typename _Hasha, typename _RangeHasha, typename _Unuseda,
364 typename _RehashPolicya, typename _Traitsa>
365 friend struct __detail::_Insert_base;
366
367 template<typename _Keya, typename _Valuea, typename _Alloca,
368 typename _ExtractKeya, typename _Equala,
369 typename _Hasha, typename _RangeHasha, typename _Unuseda,
370 typename _RehashPolicya, typename _Traitsa,
371 bool _Constant_iteratorsa>
372 friend struct __detail::_Insert;
373
374 template<typename _Keya, typename _Valuea, typename _Alloca,
375 typename _ExtractKeya, typename _Equala,
376 typename _Hasha, typename _RangeHasha, typename _Unuseda,
377 typename _RehashPolicya, typename _Traitsa,
378 bool _Unique_keysa>
379 friend struct __detail::_Equality;
380
381 public:
382 using size_type = typename __hashtable_base::size_type;
383 using difference_type = typename __hashtable_base::difference_type;
384
385#if __cplusplus > 201402L
386 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
387 using insert_return_type = _Node_insert_return<iterator, node_type>;
388#endif
389
390 private:
391 __buckets_ptr _M_buckets = &_M_single_bucket;
392 size_type _M_bucket_count = 1;
393 __node_base _M_before_begin;
394 size_type _M_element_count = 0;
395 _RehashPolicy _M_rehash_policy;
396
397 // A single bucket used when only need for 1 bucket. Especially
398 // interesting in move semantic to leave hashtable with only 1 bucket
399 // which is not allocated so that we can have those operations noexcept
400 // qualified.
401 // Note that we can't leave hashtable with 0 bucket without adding
402 // numerous checks in the code to avoid 0 modulus.
403 __node_base_ptr _M_single_bucket = nullptr;
404
405 void
406 _M_update_bbegin()
407 {
408 if (_M_begin())
409 _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
410 }
411
412 void
413 _M_update_bbegin(__node_ptr __n)
414 {
415 _M_before_begin._M_nxt = __n;
416 _M_update_bbegin();
417 }
418
419 bool
420 _M_uses_single_bucket(__buckets_ptr __bkts) const
421 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
422
423 bool
424 _M_uses_single_bucket() const
425 { return _M_uses_single_bucket(_M_buckets); }
426
427 __hashtable_alloc&
428 _M_base_alloc() { return *this; }
429
430 __buckets_ptr
431 _M_allocate_buckets(size_type __bkt_count)
432 {
433 if (__builtin_expect(__bkt_count == 1, false))
434 {
435 _M_single_bucket = nullptr;
436 return &_M_single_bucket;
437 }
438
439 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
440 }
441
442 void
443 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
444 {
445 if (_M_uses_single_bucket(__bkts))
446 return;
447
448 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
449 }
450
451 void
452 _M_deallocate_buckets()
453 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
454
455 // Gets bucket begin, deals with the fact that non-empty buckets contain
456 // their before begin node.
457 __node_ptr
458 _M_bucket_begin(size_type __bkt) const;
459
460 __node_ptr
461 _M_begin() const
462 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
463
464 // Assign *this using another _Hashtable instance. Whether elements
465 // are copied or moved depends on the _Ht reference.
466 template<typename _Ht>
467 void
468 _M_assign_elements(_Ht&&);
469
470 template<typename _Ht, typename _NodeGenerator>
471 void
472 _M_assign(_Ht&&, const _NodeGenerator&);
473
474 void
475 _M_move_assign(_Hashtable&&, true_type);
476
477 void
478 _M_move_assign(_Hashtable&&, false_type);
479
480 void
481 _M_reset() noexcept;
482
483 _Hashtable(const _Hash& __h, const _Equal& __eq,
484 const allocator_type& __a)
485 : __hashtable_base(__h, __eq),
486 __hashtable_alloc(__node_alloc_type(__a)),
487 __enable_default_ctor(_Enable_default_constructor_tag{})
488 { }
489
490 template<bool _No_realloc = true>
491 static constexpr bool
492 _S_nothrow_move()
493 {
494#if __cplusplus <= 201402L
495 return __and_<__bool_constant<_No_realloc>,
496 is_nothrow_copy_constructible<_Hash>,
497 is_nothrow_copy_constructible<_Equal>>::value;
498#else
499 if constexpr (_No_realloc)
500 if constexpr (is_nothrow_copy_constructible<_Hash>())
501 return is_nothrow_copy_constructible<_Equal>();
502 return false;
503#endif
504 }
505
506 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
507 true_type /* alloc always equal */)
508 noexcept(_S_nothrow_move());
509
510 _Hashtable(_Hashtable&&, __node_alloc_type&&,
511 false_type /* alloc always equal */);
512
513 template<typename _InputIterator>
514 _Hashtable(_InputIterator __first, _InputIterator __last,
515 size_type __bkt_count_hint,
516 const _Hash&, const _Equal&, const allocator_type&,
517 true_type __uks);
518
519 template<typename _InputIterator>
520 _Hashtable(_InputIterator __first, _InputIterator __last,
521 size_type __bkt_count_hint,
522 const _Hash&, const _Equal&, const allocator_type&,
523 false_type __uks);
524
525 public:
526 // Constructor, destructor, assignment, swap
527 _Hashtable() = default;
528
529 _Hashtable(const _Hashtable&);
530
531 _Hashtable(const _Hashtable&, const allocator_type&);
532
533 explicit
534 _Hashtable(size_type __bkt_count_hint,
535 const _Hash& __hf = _Hash(),
536 const key_equal& __eql = key_equal(),
537 const allocator_type& __a = allocator_type());
538
539 // Use delegating constructors.
540 _Hashtable(_Hashtable&& __ht)
541 noexcept(_S_nothrow_move())
542 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
543 true_type{})
544 { }
545
546 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
547 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
548 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
549 typename __node_alloc_traits::is_always_equal{})
550 { }
551
552 explicit
553 _Hashtable(const allocator_type& __a)
554 : __hashtable_alloc(__node_alloc_type(__a)),
555 __enable_default_ctor(_Enable_default_constructor_tag{})
556 { }
557
558 template<typename _InputIterator>
559 _Hashtable(_InputIterator __f, _InputIterator __l,
560 size_type __bkt_count_hint = 0,
561 const _Hash& __hf = _Hash(),
562 const key_equal& __eql = key_equal(),
563 const allocator_type& __a = allocator_type())
564 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
565 __unique_keys{})
566 { }
567
568 _Hashtable(initializer_list<value_type> __l,
569 size_type __bkt_count_hint = 0,
570 const _Hash& __hf = _Hash(),
571 const key_equal& __eql = key_equal(),
572 const allocator_type& __a = allocator_type())
573 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
574 __hf, __eql, __a, __unique_keys{})
575 { }
576
577 _Hashtable&
578 operator=(const _Hashtable& __ht);
579
580 _Hashtable&
581 operator=(_Hashtable&& __ht)
582 noexcept(__node_alloc_traits::_S_nothrow_move()
583 && is_nothrow_move_assignable<_Hash>::value
584 && is_nothrow_move_assignable<_Equal>::value)
585 {
586 constexpr bool __move_storage =
587 __node_alloc_traits::_S_propagate_on_move_assign()
588 || __node_alloc_traits::_S_always_equal();
589 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
590 return *this;
591 }
592
593 _Hashtable&
594 operator=(initializer_list<value_type> __l)
595 {
596 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
597 _M_before_begin._M_nxt = nullptr;
598 clear();
599
600 // We consider that all elements of __l are going to be inserted.
601 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
602
603 // Do not shrink to keep potential user reservation.
604 if (_M_bucket_count < __l_bkt_count)
605 rehash(__l_bkt_count);
606
607 this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
608 return *this;
609 }
610
611 ~_Hashtable() noexcept;
612
613 void
614 swap(_Hashtable&)
615 noexcept(__and_<__is_nothrow_swappable<_Hash>,
616 __is_nothrow_swappable<_Equal>>::value);
617
618 // Basic container operations
619 iterator
620 begin() noexcept
621 { return iterator(_M_begin()); }
622
623 const_iterator
624 begin() const noexcept
625 { return const_iterator(_M_begin()); }
626
627 iterator
628 end() noexcept
629 { return iterator(nullptr); }
630
631 const_iterator
632 end() const noexcept
633 { return const_iterator(nullptr); }
634
635 const_iterator
636 cbegin() const noexcept
637 { return const_iterator(_M_begin()); }
638
639 const_iterator
640 cend() const noexcept
641 { return const_iterator(nullptr); }
642
643 size_type
644 size() const noexcept
645 { return _M_element_count; }
646
647 _GLIBCXX_NODISCARD bool
648 empty() const noexcept
649 { return size() == 0; }
650
651 allocator_type
652 get_allocator() const noexcept
653 { return allocator_type(this->_M_node_allocator()); }
654
655 size_type
656 max_size() const noexcept
657 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
658
659 // Observers
660 key_equal
661 key_eq() const
662 { return this->_M_eq(); }
663
664 // hash_function, if present, comes from _Hash_code_base.
665
666 // Bucket operations
667 size_type
668 bucket_count() const noexcept
669 { return _M_bucket_count; }
670
671 size_type
672 max_bucket_count() const noexcept
673 { return max_size(); }
674
675 size_type
676 bucket_size(size_type __bkt) const
677 { return std::distance(begin(__bkt), end(__bkt)); }
678
679 size_type
680 bucket(const key_type& __k) const
681 { return _M_bucket_index(this->_M_hash_code(__k)); }
682
683 local_iterator
684 begin(size_type __bkt)
685 {
686 return local_iterator(*this, _M_bucket_begin(__bkt),
687 __bkt, _M_bucket_count);
688 }
689
690 local_iterator
691 end(size_type __bkt)
692 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
693
694 const_local_iterator
695 begin(size_type __bkt) const
696 {
697 return const_local_iterator(*this, _M_bucket_begin(__bkt),
698 __bkt, _M_bucket_count);
699 }
700
701 const_local_iterator
702 end(size_type __bkt) const
703 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
704
705 // DR 691.
706 const_local_iterator
707 cbegin(size_type __bkt) const
708 {
709 return const_local_iterator(*this, _M_bucket_begin(__bkt),
710 __bkt, _M_bucket_count);
711 }
712
713 const_local_iterator
714 cend(size_type __bkt) const
715 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
716
717 float
718 load_factor() const noexcept
719 {
720 return static_cast<float>(size()) / static_cast<float>(bucket_count());
721 }
722
723 // max_load_factor, if present, comes from _Rehash_base.
724
725 // Generalization of max_load_factor. Extension, not found in
726 // TR1. Only useful if _RehashPolicy is something other than
727 // the default.
728 const _RehashPolicy&
729 __rehash_policy() const
730 { return _M_rehash_policy; }
731
732 void
733 __rehash_policy(const _RehashPolicy& __pol)
734 { _M_rehash_policy = __pol; }
735
736 // Lookup.
737 iterator
738 find(const key_type& __k);
739
740 const_iterator
741 find(const key_type& __k) const;
742
743 size_type
744 count(const key_type& __k) const;
745
746 std::pair<iterator, iterator>
747 equal_range(const key_type& __k);
748
749 std::pair<const_iterator, const_iterator>
750 equal_range(const key_type& __k) const;
751
752#if __cplusplus >= 202002L
753#define __cpp_lib_generic_unordered_lookup 201811L
754
755 template<typename _Kt,
756 typename = __has_is_transparent_t<_Hash, _Kt>,
757 typename = __has_is_transparent_t<_Equal, _Kt>>
758 iterator
759 _M_find_tr(const _Kt& __k);
760
761 template<typename _Kt,
762 typename = __has_is_transparent_t<_Hash, _Kt>,
763 typename = __has_is_transparent_t<_Equal, _Kt>>
764 const_iterator
765 _M_find_tr(const _Kt& __k) const;
766
767 template<typename _Kt,
768 typename = __has_is_transparent_t<_Hash, _Kt>,
769 typename = __has_is_transparent_t<_Equal, _Kt>>
770 size_type
771 _M_count_tr(const _Kt& __k) const;
772
773 template<typename _Kt,
774 typename = __has_is_transparent_t<_Hash, _Kt>,
775 typename = __has_is_transparent_t<_Equal, _Kt>>
776 pair<iterator, iterator>
777 _M_equal_range_tr(const _Kt& __k);
778
779 template<typename _Kt,
780 typename = __has_is_transparent_t<_Hash, _Kt>,
781 typename = __has_is_transparent_t<_Equal, _Kt>>
782 pair<const_iterator, const_iterator>
783 _M_equal_range_tr(const _Kt& __k) const;
784#endif // C++20
785
786 private:
787 // Bucket index computation helpers.
788 size_type
789 _M_bucket_index(const __node_value_type& __n) const noexcept
790 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
791
792 size_type
793 _M_bucket_index(__hash_code __c) const
794 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
795
796 // Find and insert helper functions and types
797 // Find the node before the one matching the criteria.
798 __node_base_ptr
799 _M_find_before_node(size_type, const key_type&, __hash_code) const;
800
801 template<typename _Kt>
802 __node_base_ptr
803 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
804
805 __node_ptr
806 _M_find_node(size_type __bkt, const key_type& __key,
807 __hash_code __c) const
808 {
809 __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
810 if (__before_n)
811 return static_cast<__node_ptr>(__before_n->_M_nxt);
812 return nullptr;
813 }
814
815 template<typename _Kt>
816 __node_ptr
817 _M_find_node_tr(size_type __bkt, const _Kt& __key,
818 __hash_code __c) const
819 {
820 auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
821 if (__before_n)
822 return static_cast<__node_ptr>(__before_n->_M_nxt);
823 return nullptr;
824 }
825
826 // Insert a node at the beginning of a bucket.
827 void
828 _M_insert_bucket_begin(size_type, __node_ptr);
829
830 // Remove the bucket first node
831 void
832 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
833 size_type __next_bkt);
834
835 // Get the node before __n in the bucket __bkt
836 __node_base_ptr
837 _M_get_previous_node(size_type __bkt, __node_ptr __n);
838
839 // Insert node __n with hash code __code, in bucket __bkt if no
840 // rehash (assumes no element with same key already present).
841 // Takes ownership of __n if insertion succeeds, throws otherwise.
842 iterator
843 _M_insert_unique_node(size_type __bkt, __hash_code,
844 __node_ptr __n, size_type __n_elt = 1);
845
846 // Insert node __n with key __k and hash code __code.
847 // Takes ownership of __n if insertion succeeds, throws otherwise.
848 iterator
849 _M_insert_multi_node(__node_ptr __hint,
850 __hash_code __code, __node_ptr __n);
851
852 template<typename... _Args>
853 std::pair<iterator, bool>
854 _M_emplace(true_type __uks, _Args&&... __args);
855
856 template<typename... _Args>
857 iterator
858 _M_emplace(false_type __uks, _Args&&... __args)
859 { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
860
861 // Emplace with hint, useless when keys are unique.
862 template<typename... _Args>
863 iterator
864 _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
865 { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
866
867 template<typename... _Args>
868 iterator
869 _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
870
871 template<typename _Arg, typename _NodeGenerator>
872 std::pair<iterator, bool>
873 _M_insert(_Arg&&, const _NodeGenerator&, true_type __uks);
874
875 template<typename _Arg, typename _NodeGenerator>
876 iterator
877 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
878 false_type __uks)
879 {
880 return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
881 __uks);
882 }
883
884 // Insert with hint, not used when keys are unique.
885 template<typename _Arg, typename _NodeGenerator>
886 iterator
887 _M_insert(const_iterator, _Arg&& __arg,
888 const _NodeGenerator& __node_gen, true_type __uks)
889 {
890 return
891 _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
892 }
893
894 // Insert with hint when keys are not unique.
895 template<typename _Arg, typename _NodeGenerator>
896 iterator
897 _M_insert(const_iterator, _Arg&&,
898 const _NodeGenerator&, false_type __uks);
899
900 size_type
901 _M_erase(true_type __uks, const key_type&);
902
903 size_type
904 _M_erase(false_type __uks, const key_type&);
905
906 iterator
907 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
908
909 public:
910 // Emplace
911 template<typename... _Args>
912 __ireturn_type
913 emplace(_Args&&... __args)
914 { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
915
916 template<typename... _Args>
917 iterator
918 emplace_hint(const_iterator __hint, _Args&&... __args)
919 {
920 return _M_emplace(__hint, __unique_keys{},
921 std::forward<_Args>(__args)...);
922 }
923
924 // Insert member functions via inheritance.
925
926 // Erase
927 iterator
928 erase(const_iterator);
929
930 // LWG 2059.
931 iterator
932 erase(iterator __it)
933 { return erase(const_iterator(__it)); }
934
935 size_type
936 erase(const key_type& __k)
937 { return _M_erase(__unique_keys{}, __k); }
938
939 iterator
940 erase(const_iterator, const_iterator);
941
942 void
943 clear() noexcept;
944
945 // Set number of buckets keeping it appropriate for container's number
946 // of elements.
947 void rehash(size_type __bkt_count);
948
949 // DR 1189.
950 // reserve, if present, comes from _Rehash_base.
951
952#if __cplusplus > 201402L
953 /// Re-insert an extracted node into a container with unique keys.
954 insert_return_type
955 _M_reinsert_node(node_type&& __nh)
956 {
957 insert_return_type __ret;
958 if (__nh.empty())
959 __ret.position = end();
960 else
961 {
962 __glibcxx_assert(get_allocator() == __nh.get_allocator());
963
964 const key_type& __k = __nh._M_key();
965 __hash_code __code = this->_M_hash_code(__k);
966 size_type __bkt = _M_bucket_index(__code);
967 if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
968 {
969 __ret.node = std::move(__nh);
970 __ret.position = iterator(__n);
971 __ret.inserted = false;
972 }
973 else
974 {
975 __ret.position
976 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
977 __nh._M_ptr = nullptr;
978 __ret.inserted = true;
979 }
980 }
981 return __ret;
982 }
983
984 /// Re-insert an extracted node into a container with equivalent keys.
985 iterator
986 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
987 {
988 if (__nh.empty())
989 return end();
990
991 __glibcxx_assert(get_allocator() == __nh.get_allocator());
992
993 const key_type& __k = __nh._M_key();
994 auto __code = this->_M_hash_code(__k);
995 auto __ret
996 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
997 __nh._M_ptr = nullptr;
998 return __ret;
999 }
1000
1001 private:
1002 node_type
1003 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1004 {
1005 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1006 if (__prev_n == _M_buckets[__bkt])
1007 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1008 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1009 else if (__n->_M_nxt)
1010 {
1011 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1012 if (__next_bkt != __bkt)
1013 _M_buckets[__next_bkt] = __prev_n;
1014 }
1015
1016 __prev_n->_M_nxt = __n->_M_nxt;
1017 __n->_M_nxt = nullptr;
1018 --_M_element_count;
1019 return { __n, this->_M_node_allocator() };
1020 }
1021
1022 public:
1023 // Extract a node.
1024 node_type
1025 extract(const_iterator __pos)
1026 {
1027 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1028 return _M_extract_node(__bkt,
1029 _M_get_previous_node(__bkt, __pos._M_cur));
1030 }
1031
1032 /// Extract a node.
1033 node_type
1034 extract(const _Key& __k)
1035 {
1036 node_type __nh;
1037 __hash_code __code = this->_M_hash_code(__k);
1038 std::size_t __bkt = _M_bucket_index(__code);
1039 if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1040 __nh = _M_extract_node(__bkt, __prev_node);
1041 return __nh;
1042 }
1043
1044 /// Merge from a compatible container into one with unique keys.
1045 template<typename _Compatible_Hashtable>
1046 void
1047 _M_merge_unique(_Compatible_Hashtable& __src) noexcept
1048 {
1049 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1050 node_type>, "Node types are compatible");
1051 __glibcxx_assert(get_allocator() == __src.get_allocator());
1052
1053 auto __n_elt = __src.size();
1054 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1055 {
1056 auto __pos = __i++;
1057 const key_type& __k = _ExtractKey{}(*__pos);
1058 __hash_code __code = this->_M_hash_code(__k);
1059 size_type __bkt = _M_bucket_index(__code);
1060 if (_M_find_node(__bkt, __k, __code) == nullptr)
1061 {
1062 auto __nh = __src.extract(__pos);
1063 _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1064 __nh._M_ptr = nullptr;
1065 __n_elt = 1;
1066 }
1067 else if (__n_elt != 1)
1068 --__n_elt;
1069 }
1070 }
1071
1072 /// Merge from a compatible container into one with equivalent keys.
1073 template<typename _Compatible_Hashtable>
1074 void
1075 _M_merge_multi(_Compatible_Hashtable& __src) noexcept
1076 {
1077 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1078 node_type>, "Node types are compatible");
1079 __glibcxx_assert(get_allocator() == __src.get_allocator());
1080
1081 this->reserve(size() + __src.size());
1082 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1083 _M_reinsert_node_multi(cend(), __src.extract(__i++));
1084 }
1085#endif // C++17
1086
1087 private:
1088 // Helper rehash method used when keys are unique.
1089 void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1090
1091 // Helper rehash method used when keys can be non-unique.
1092 void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1093
1094 // Unconditionally change size of bucket array to n, restore
1095 // hash policy state to __state on exception.
1096 void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1097 };
1098
1099
1100 // Definitions of class template _Hashtable's out-of-line member functions.
1101 template<typename _Key, typename _Value, typename _Alloc,
1102 typename _ExtractKey, typename _Equal,
1103 typename _Hash, typename _RangeHash, typename _Unused,
1104 typename _RehashPolicy, typename _Traits>
1105 auto
1106 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1107 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1108 _M_bucket_begin(size_type __bkt) const
1109 -> __node_ptr
1110 {
1111 __node_base_ptr __n = _M_buckets[__bkt];
1112 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1113 }
1114
1115 template<typename _Key, typename _Value, typename _Alloc,
1116 typename _ExtractKey, typename _Equal,
1117 typename _Hash, typename _RangeHash, typename _Unused,
1118 typename _RehashPolicy, typename _Traits>
1119 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1120 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1121 _Hashtable(size_type __bkt_count_hint,
1122 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1123 : _Hashtable(__h, __eq, __a)
1124 {
1125 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1126 if (__bkt_count > _M_bucket_count)
1127 {
1128 _M_buckets = _M_allocate_buckets(__bkt_count);
1129 _M_bucket_count = __bkt_count;
1130 }
1131 }
1132
1133 template<typename _Key, typename _Value, typename _Alloc,
1134 typename _ExtractKey, typename _Equal,
1135 typename _Hash, typename _RangeHash, typename _Unused,
1136 typename _RehashPolicy, typename _Traits>
1137 template<typename _InputIterator>
1138 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1139 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1140 _Hashtable(_InputIterator __f, _InputIterator __l,
1141 size_type __bkt_count_hint,
1142 const _Hash& __h, const _Equal& __eq,
1143 const allocator_type& __a, true_type /* __uks */)
1144 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1145 {
1146 for (; __f != __l; ++__f)
1147 this->insert(*__f);
1148 }
1149
1150 template<typename _Key, typename _Value, typename _Alloc,
1151 typename _ExtractKey, typename _Equal,
1152 typename _Hash, typename _RangeHash, typename _Unused,
1153 typename _RehashPolicy, typename _Traits>
1154 template<typename _InputIterator>
1155 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1156 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1157 _Hashtable(_InputIterator __f, _InputIterator __l,
1158 size_type __bkt_count_hint,
1159 const _Hash& __h, const _Equal& __eq,
1160 const allocator_type& __a, false_type /* __uks */)
1161 : _Hashtable(__h, __eq, __a)
1162 {
1163 auto __nb_elems = __detail::__distance_fw(__f, __l);
1164 auto __bkt_count =
1165 _M_rehash_policy._M_next_bkt(
1166 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1167 __bkt_count_hint));
1168
1169 if (__bkt_count > _M_bucket_count)
1170 {
1171 _M_buckets = _M_allocate_buckets(__bkt_count);
1172 _M_bucket_count = __bkt_count;
1173 }
1174
1175 for (; __f != __l; ++__f)
1176 this->insert(*__f);
1177 }
1178
1179 template<typename _Key, typename _Value, typename _Alloc,
1180 typename _ExtractKey, typename _Equal,
1181 typename _Hash, typename _RangeHash, typename _Unused,
1182 typename _RehashPolicy, typename _Traits>
1183 auto
1184 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1185 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1186 operator=(const _Hashtable& __ht)
1187 -> _Hashtable&
1188 {
1189 if (&__ht == this)
1190 return *this;
1191
1192 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1193 {
1194 auto& __this_alloc = this->_M_node_allocator();
1195 auto& __that_alloc = __ht._M_node_allocator();
1196 if (!__node_alloc_traits::_S_always_equal()
1197 && __this_alloc != __that_alloc)
1198 {
1199 // Replacement allocator cannot free existing storage.
1200 this->_M_deallocate_nodes(_M_begin());
1201 _M_before_begin._M_nxt = nullptr;
1202 _M_deallocate_buckets();
1203 _M_buckets = nullptr;
1204 std::__alloc_on_copy(__this_alloc, __that_alloc);
1205 __hashtable_base::operator=(__ht);
1206 _M_bucket_count = __ht._M_bucket_count;
1207 _M_element_count = __ht._M_element_count;
1208 _M_rehash_policy = __ht._M_rehash_policy;
1209 __alloc_node_gen_t __alloc_node_gen(*this);
1210 __try
1211 {
1212 _M_assign(__ht, __alloc_node_gen);
1213 }
1214 __catch(...)
1215 {
1216 // _M_assign took care of deallocating all memory. Now we
1217 // must make sure this instance remains in a usable state.
1218 _M_reset();
1219 __throw_exception_again;
1220 }
1221 return *this;
1222 }
1223 std::__alloc_on_copy(__this_alloc, __that_alloc);
1224 }
1225
1226 // Reuse allocated buckets and nodes.
1227 _M_assign_elements(__ht);
1228 return *this;
1229 }
1230
1231 template<typename _Key, typename _Value, typename _Alloc,
1232 typename _ExtractKey, typename _Equal,
1233 typename _Hash, typename _RangeHash, typename _Unused,
1234 typename _RehashPolicy, typename _Traits>
1235 template<typename _Ht>
1236 void
1237 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1238 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1239 _M_assign_elements(_Ht&& __ht)
1240 {
1241 __buckets_ptr __former_buckets = nullptr;
1242 std::size_t __former_bucket_count = _M_bucket_count;
1243 const __rehash_state& __former_state = _M_rehash_policy._M_state();
1244
1245 if (_M_bucket_count != __ht._M_bucket_count)
1246 {
1247 __former_buckets = _M_buckets;
1248 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1249 _M_bucket_count = __ht._M_bucket_count;
1250 }
1251 else
1252 __builtin_memset(_M_buckets, 0,
1253 _M_bucket_count * sizeof(__node_base_ptr));
1254
1255 __try
1256 {
1257 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1258 _M_element_count = __ht._M_element_count;
1259 _M_rehash_policy = __ht._M_rehash_policy;
1260 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1261 _M_before_begin._M_nxt = nullptr;
1262 _M_assign(std::forward<_Ht>(__ht), __roan);
1263 if (__former_buckets)
1264 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1265 }
1266 __catch(...)
1267 {
1268 if (__former_buckets)
1269 {
1270 // Restore previous buckets.
1271 _M_deallocate_buckets();
1272 _M_rehash_policy._M_reset(__former_state);
1273 _M_buckets = __former_buckets;
1274 _M_bucket_count = __former_bucket_count;
1275 }
1276 __builtin_memset(_M_buckets, 0,
1277 _M_bucket_count * sizeof(__node_base_ptr));
1278 __throw_exception_again;
1279 }
1280 }
1281
1282 template<typename _Key, typename _Value, typename _Alloc,
1283 typename _ExtractKey, typename _Equal,
1284 typename _Hash, typename _RangeHash, typename _Unused,
1285 typename _RehashPolicy, typename _Traits>
1286 template<typename _Ht, typename _NodeGenerator>
1287 void
1288 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1289 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1290 _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1291 {
1292 __buckets_ptr __buckets = nullptr;
1293 if (!_M_buckets)
1294 _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1295
1296 __try
1297 {
1298 if (!__ht._M_before_begin._M_nxt)
1299 return;
1300
1301 // First deal with the special first node pointed to by
1302 // _M_before_begin.
1303 __node_ptr __ht_n = __ht._M_begin();
1304 __node_ptr __this_n
1305 = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1306 this->_M_copy_code(*__this_n, *__ht_n);
1307 _M_update_bbegin(__this_n);
1308
1309 // Then deal with other nodes.
1310 __node_ptr __prev_n = __this_n;
1311 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1312 {
1313 __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1314 __prev_n->_M_nxt = __this_n;
1315 this->_M_copy_code(*__this_n, *__ht_n);
1316 size_type __bkt = _M_bucket_index(*__this_n);
1317 if (!_M_buckets[__bkt])
1318 _M_buckets[__bkt] = __prev_n;
1319 __prev_n = __this_n;
1320 }
1321 }
1322 __catch(...)
1323 {
1324 clear();
1325 if (__buckets)
1326 _M_deallocate_buckets();
1327 __throw_exception_again;
1328 }
1329 }
1330
1331 template<typename _Key, typename _Value, typename _Alloc,
1332 typename _ExtractKey, typename _Equal,
1333 typename _Hash, typename _RangeHash, typename _Unused,
1334 typename _RehashPolicy, typename _Traits>
1335 void
1336 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1337 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1338 _M_reset() noexcept
1339 {
1340 _M_rehash_policy._M_reset();
1341 _M_bucket_count = 1;
1342 _M_single_bucket = nullptr;
1343 _M_buckets = &_M_single_bucket;
1344 _M_before_begin._M_nxt = nullptr;
1345 _M_element_count = 0;
1346 }
1347
1348 template<typename _Key, typename _Value, typename _Alloc,
1349 typename _ExtractKey, typename _Equal,
1350 typename _Hash, typename _RangeHash, typename _Unused,
1351 typename _RehashPolicy, typename _Traits>
1352 void
1353 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1354 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1355 _M_move_assign(_Hashtable&& __ht, true_type)
1356 {
1357 if (__builtin_expect(std::__addressof(__ht) == this, false))
1358 return;
1359
1360 this->_M_deallocate_nodes(_M_begin());
1361 _M_deallocate_buckets();
1362 __hashtable_base::operator=(std::move(__ht));
1363 _M_rehash_policy = __ht._M_rehash_policy;
1364 if (!__ht._M_uses_single_bucket())
1365 _M_buckets = __ht._M_buckets;
1366 else
1367 {
1368 _M_buckets = &_M_single_bucket;
1369 _M_single_bucket = __ht._M_single_bucket;
1370 }
1371
1372 _M_bucket_count = __ht._M_bucket_count;
1373 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1374 _M_element_count = __ht._M_element_count;
1375 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1376
1377 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1378 _M_update_bbegin();
1379 __ht._M_reset();
1380 }
1381
1382 template<typename _Key, typename _Value, typename _Alloc,
1383 typename _ExtractKey, typename _Equal,
1384 typename _Hash, typename _RangeHash, typename _Unused,
1385 typename _RehashPolicy, typename _Traits>
1386 void
1387 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1388 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1389 _M_move_assign(_Hashtable&& __ht, false_type)
1390 {
1391 if (__ht._M_node_allocator() == this->_M_node_allocator())
1392 _M_move_assign(std::move(__ht), true_type{});
1393 else
1394 {
1395 // Can't move memory, move elements then.
1396 _M_assign_elements(std::move(__ht));
1397 __ht.clear();
1398 }
1399 }
1400
1401 template<typename _Key, typename _Value, typename _Alloc,
1402 typename _ExtractKey, typename _Equal,
1403 typename _Hash, typename _RangeHash, typename _Unused,
1404 typename _RehashPolicy, typename _Traits>
1405 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1406 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1407 _Hashtable(const _Hashtable& __ht)
1408 : __hashtable_base(__ht),
1409 __map_base(__ht),
1410 __rehash_base(__ht),
1411 __hashtable_alloc(
1412 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1413 __enable_default_ctor(__ht),
1414 _M_buckets(nullptr),
1415 _M_bucket_count(__ht._M_bucket_count),
1416 _M_element_count(__ht._M_element_count),
1417 _M_rehash_policy(__ht._M_rehash_policy)
1418 {
1419 __alloc_node_gen_t __alloc_node_gen(*this);
1420 _M_assign(__ht, __alloc_node_gen);
1421 }
1422
1423 template<typename _Key, typename _Value, typename _Alloc,
1424 typename _ExtractKey, typename _Equal,
1425 typename _Hash, typename _RangeHash, typename _Unused,
1426 typename _RehashPolicy, typename _Traits>
1427 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1428 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1429 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1430 true_type /* alloc always equal */)
1431 noexcept(_S_nothrow_move())
1432 : __hashtable_base(__ht),
1433 __map_base(__ht),
1434 __rehash_base(__ht),
1435 __hashtable_alloc(std::move(__a)),
1436 __enable_default_ctor(__ht),
1437 _M_buckets(__ht._M_buckets),
1438 _M_bucket_count(__ht._M_bucket_count),
1439 _M_before_begin(__ht._M_before_begin._M_nxt),
1440 _M_element_count(__ht._M_element_count),
1441 _M_rehash_policy(__ht._M_rehash_policy)
1442 {
1443 // Update buckets if __ht is using its single bucket.
1444 if (__ht._M_uses_single_bucket())
1445 {
1446 _M_buckets = &_M_single_bucket;
1447 _M_single_bucket = __ht._M_single_bucket;
1448 }
1449
1450 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1451 _M_update_bbegin();
1452
1453 __ht._M_reset();
1454 }
1455
1456 template<typename _Key, typename _Value, typename _Alloc,
1457 typename _ExtractKey, typename _Equal,
1458 typename _Hash, typename _RangeHash, typename _Unused,
1459 typename _RehashPolicy, typename _Traits>
1460 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1461 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1462 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1463 : __hashtable_base(__ht),
1464 __map_base(__ht),
1465 __rehash_base(__ht),
1466 __hashtable_alloc(__node_alloc_type(__a)),
1467 __enable_default_ctor(__ht),
1468 _M_buckets(),
1469 _M_bucket_count(__ht._M_bucket_count),
1470 _M_element_count(__ht._M_element_count),
1471 _M_rehash_policy(__ht._M_rehash_policy)
1472 {
1473 __alloc_node_gen_t __alloc_node_gen(*this);
1474 _M_assign(__ht, __alloc_node_gen);
1475 }
1476
1477 template<typename _Key, typename _Value, typename _Alloc,
1478 typename _ExtractKey, typename _Equal,
1479 typename _Hash, typename _RangeHash, typename _Unused,
1480 typename _RehashPolicy, typename _Traits>
1481 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1482 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1483 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1484 false_type /* alloc always equal */)
1485 : __hashtable_base(__ht),
1486 __map_base(__ht),
1487 __rehash_base(__ht),
1488 __hashtable_alloc(std::move(__a)),
1489 __enable_default_ctor(__ht),
1490 _M_buckets(nullptr),
1491 _M_bucket_count(__ht._M_bucket_count),
1492 _M_element_count(__ht._M_element_count),
1493 _M_rehash_policy(__ht._M_rehash_policy)
1494 {
1495 if (__ht._M_node_allocator() == this->_M_node_allocator())
1496 {
1497 if (__ht._M_uses_single_bucket())
1498 {
1499 _M_buckets = &_M_single_bucket;
1500 _M_single_bucket = __ht._M_single_bucket;
1501 }
1502 else
1503 _M_buckets = __ht._M_buckets;
1504
1505 // Fix bucket containing the _M_before_begin pointer that can't be
1506 // moved.
1507 _M_update_bbegin(__ht._M_begin());
1508
1509 __ht._M_reset();
1510 }
1511 else
1512 {
1513 __alloc_node_gen_t __alloc_gen(*this);
1514
1515 using _Fwd_Ht = typename
1516 conditional<__move_if_noexcept_cond<value_type>::value,
1517 const _Hashtable&, _Hashtable&&>::type;
1518 _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1519 __ht.clear();
1520 }
1521 }
1522
1523 template<typename _Key, typename _Value, typename _Alloc,
1524 typename _ExtractKey, typename _Equal,
1525 typename _Hash, typename _RangeHash, typename _Unused,
1526 typename _RehashPolicy, typename _Traits>
1527 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1528 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1529 ~_Hashtable() noexcept
1530 {
1531 clear();
1532 _M_deallocate_buckets();
1533 }
1534
1535 template<typename _Key, typename _Value, typename _Alloc,
1536 typename _ExtractKey, typename _Equal,
1537 typename _Hash, typename _RangeHash, typename _Unused,
1538 typename _RehashPolicy, typename _Traits>
1539 void
1540 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1541 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1542 swap(_Hashtable& __x)
1543 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1544 __is_nothrow_swappable<_Equal>>::value)
1545 {
1546 // The only base class with member variables is hash_code_base.
1547 // We define _Hash_code_base::_M_swap because different
1548 // specializations have different members.
1549 this->_M_swap(__x);
1550
1551 std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1552 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1553
1554 // Deal properly with potentially moved instances.
1555 if (this->_M_uses_single_bucket())
1556 {
1557 if (!__x._M_uses_single_bucket())
1558 {
1559 _M_buckets = __x._M_buckets;
1560 __x._M_buckets = &__x._M_single_bucket;
1561 }
1562 }
1563 else if (__x._M_uses_single_bucket())
1564 {
1565 __x._M_buckets = _M_buckets;
1566 _M_buckets = &_M_single_bucket;
1567 }
1568 else
1569 std::swap(_M_buckets, __x._M_buckets);
1570
1571 std::swap(_M_bucket_count, __x._M_bucket_count);
1572 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1573 std::swap(_M_element_count, __x._M_element_count);
1574 std::swap(_M_single_bucket, __x._M_single_bucket);
1575
1576 // Fix buckets containing the _M_before_begin pointers that can't be
1577 // swapped.
1578 _M_update_bbegin();
1579 __x._M_update_bbegin();
1580 }
1581
1582 template<typename _Key, typename _Value, typename _Alloc,
1583 typename _ExtractKey, typename _Equal,
1584 typename _Hash, typename _RangeHash, typename _Unused,
1585 typename _RehashPolicy, typename _Traits>
1586 auto
1587 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1588 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1589 find(const key_type& __k)
1590 -> iterator
1591 {
1592 __hash_code __code = this->_M_hash_code(__k);
1593 std::size_t __bkt = _M_bucket_index(__code);
1594 return iterator(_M_find_node(__bkt, __k, __code));
1595 }
1596
1597 template<typename _Key, typename _Value, typename _Alloc,
1598 typename _ExtractKey, typename _Equal,
1599 typename _Hash, typename _RangeHash, typename _Unused,
1600 typename _RehashPolicy, typename _Traits>
1601 auto
1602 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1603 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1604 find(const key_type& __k) const
1605 -> const_iterator
1606 {
1607 __hash_code __code = this->_M_hash_code(__k);
1608 std::size_t __bkt = _M_bucket_index(__code);
1609 return const_iterator(_M_find_node(__bkt, __k, __code));
1610 }
1611
1612#if __cplusplus > 201703L
1613 template<typename _Key, typename _Value, typename _Alloc,
1614 typename _ExtractKey, typename _Equal,
1615 typename _Hash, typename _RangeHash, typename _Unused,
1616 typename _RehashPolicy, typename _Traits>
1617 template<typename _Kt, typename, typename>
1618 auto
1619 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1620 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1621 _M_find_tr(const _Kt& __k)
1622 -> iterator
1623 {
1624 __hash_code __code = this->_M_hash_code_tr(__k);
1625 std::size_t __bkt = _M_bucket_index(__code);
1626 return iterator(_M_find_node_tr(__bkt, __k, __code));
1627 }
1628
1629 template<typename _Key, typename _Value, typename _Alloc,
1630 typename _ExtractKey, typename _Equal,
1631 typename _Hash, typename _RangeHash, typename _Unused,
1632 typename _RehashPolicy, typename _Traits>
1633 template<typename _Kt, typename, typename>
1634 auto
1635 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1636 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1637 _M_find_tr(const _Kt& __k) const
1638 -> const_iterator
1639 {
1640 __hash_code __code = this->_M_hash_code_tr(__k);
1641 std::size_t __bkt = _M_bucket_index(__code);
1642 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1643 }
1644#endif
1645
1646 template<typename _Key, typename _Value, typename _Alloc,
1647 typename _ExtractKey, typename _Equal,
1648 typename _Hash, typename _RangeHash, typename _Unused,
1649 typename _RehashPolicy, typename _Traits>
1650 auto
1651 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1652 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1653 count(const key_type& __k) const
1654 -> size_type
1655 {
1656 auto __it = find(__k);
1657 if (!__it._M_cur)
1658 return 0;
1659
1660 if (__unique_keys::value)
1661 return 1;
1662
1663 // All equivalent values are next to each other, if we find a
1664 // non-equivalent value after an equivalent one it means that we won't
1665 // find any new equivalent value.
1666 size_type __result = 1;
1667 for (auto __ref = __it++;
1668 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1669 ++__it)
1670 ++__result;
1671
1672 return __result;
1673 }
1674
1675#if __cplusplus > 201703L
1676 template<typename _Key, typename _Value, typename _Alloc,
1677 typename _ExtractKey, typename _Equal,
1678 typename _Hash, typename _RangeHash, typename _Unused,
1679 typename _RehashPolicy, typename _Traits>
1680 template<typename _Kt, typename, typename>
1681 auto
1682 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1683 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1684 _M_count_tr(const _Kt& __k) const
1685 -> size_type
1686 {
1687 __hash_code __code = this->_M_hash_code_tr(__k);
1688 std::size_t __bkt = _M_bucket_index(__code);
1689 auto __n = _M_find_node_tr(__bkt, __k, __code);
1690 if (!__n)
1691 return 0;
1692
1693 // All equivalent values are next to each other, if we find a
1694 // non-equivalent value after an equivalent one it means that we won't
1695 // find any new equivalent value.
1696 iterator __it(__n);
1697 size_type __result = 1;
1698 for (++__it;
1699 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1700 ++__it)
1701 ++__result;
1702
1703 return __result;
1704 }
1705#endif
1706
1707 template<typename _Key, typename _Value, typename _Alloc,
1708 typename _ExtractKey, typename _Equal,
1709 typename _Hash, typename _RangeHash, typename _Unused,
1710 typename _RehashPolicy, typename _Traits>
1711 auto
1712 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1713 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1714 equal_range(const key_type& __k)
1715 -> pair<iterator, iterator>
1716 {
1717 auto __ite = find(__k);
1718 if (!__ite._M_cur)
1719 return { __ite, __ite };
1720
1721 auto __beg = __ite++;
1722 if (__unique_keys::value)
1723 return { __beg, __ite };
1724
1725 // All equivalent values are next to each other, if we find a
1726 // non-equivalent value after an equivalent one it means that we won't
1727 // find any new equivalent value.
1728 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1729 ++__ite;
1730
1731 return { __beg, __ite };
1732 }
1733
1734 template<typename _Key, typename _Value, typename _Alloc,
1735 typename _ExtractKey, typename _Equal,
1736 typename _Hash, typename _RangeHash, typename _Unused,
1737 typename _RehashPolicy, typename _Traits>
1738 auto
1739 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1740 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1741 equal_range(const key_type& __k) const
1742 -> pair<const_iterator, const_iterator>
1743 {
1744 auto __ite = find(__k);
1745 if (!__ite._M_cur)
1746 return { __ite, __ite };
1747
1748 auto __beg = __ite++;
1749 if (__unique_keys::value)
1750 return { __beg, __ite };
1751
1752 // All equivalent values are next to each other, if we find a
1753 // non-equivalent value after an equivalent one it means that we won't
1754 // find any new equivalent value.
1755 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1756 ++__ite;
1757
1758 return { __beg, __ite };
1759 }
1760
1761#if __cplusplus > 201703L
1762 template<typename _Key, typename _Value, typename _Alloc,
1763 typename _ExtractKey, typename _Equal,
1764 typename _Hash, typename _RangeHash, typename _Unused,
1765 typename _RehashPolicy, typename _Traits>
1766 template<typename _Kt, typename, typename>
1767 auto
1768 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1769 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1770 _M_equal_range_tr(const _Kt& __k)
1771 -> pair<iterator, iterator>
1772 {
1773 __hash_code __code = this->_M_hash_code_tr(__k);
1774 std::size_t __bkt = _M_bucket_index(__code);
1775 auto __n = _M_find_node_tr(__bkt, __k, __code);
1776 iterator __ite(__n);
1777 if (!__n)
1778 return { __ite, __ite };
1779
1780 // All equivalent values are next to each other, if we find a
1781 // non-equivalent value after an equivalent one it means that we won't
1782 // find any new equivalent value.
1783 auto __beg = __ite++;
1784 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1785 ++__ite;
1786
1787 return { __beg, __ite };
1788 }
1789
1790 template<typename _Key, typename _Value, typename _Alloc,
1791 typename _ExtractKey, typename _Equal,
1792 typename _Hash, typename _RangeHash, typename _Unused,
1793 typename _RehashPolicy, typename _Traits>
1794 template<typename _Kt, typename, typename>
1795 auto
1796 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1797 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1798 _M_equal_range_tr(const _Kt& __k) const
1799 -> pair<const_iterator, const_iterator>
1800 {
1801 __hash_code __code = this->_M_hash_code_tr(__k);
1802 std::size_t __bkt = _M_bucket_index(__code);
1803 auto __n = _M_find_node_tr(__bkt, __k, __code);
1804 const_iterator __ite(__n);
1805 if (!__n)
1806 return { __ite, __ite };
1807
1808 // All equivalent values are next to each other, if we find a
1809 // non-equivalent value after an equivalent one it means that we won't
1810 // find any new equivalent value.
1811 auto __beg = __ite++;
1812 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1813 ++__ite;
1814
1815 return { __beg, __ite };
1816 }
1817#endif
1818
1819 // Find the node before the one whose key compares equal to k in the bucket
1820 // bkt. Return nullptr if no node is found.
1821 template<typename _Key, typename _Value, typename _Alloc,
1822 typename _ExtractKey, typename _Equal,
1823 typename _Hash, typename _RangeHash, typename _Unused,
1824 typename _RehashPolicy, typename _Traits>
1825 auto
1826 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1827 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1828 _M_find_before_node(size_type __bkt, const key_type& __k,
1829 __hash_code __code) const
1830 -> __node_base_ptr
1831 {
1832 __node_base_ptr __prev_p = _M_buckets[__bkt];
1833 if (!__prev_p)
1834 return nullptr;
1835
1836 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1837 __p = __p->_M_next())
1838 {
1839 if (this->_M_equals(__k, __code, *__p))
1840 return __prev_p;
1841
1842 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1843 break;
1844 __prev_p = __p;
1845 }
1846
1847 return nullptr;
1848 }
1849
1850 template<typename _Key, typename _Value, typename _Alloc,
1851 typename _ExtractKey, typename _Equal,
1852 typename _Hash, typename _RangeHash, typename _Unused,
1853 typename _RehashPolicy, typename _Traits>
1854 template<typename _Kt>
1855 auto
1856 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1857 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1858 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1859 __hash_code __code) const
1860 -> __node_base_ptr
1861 {
1862 __node_base_ptr __prev_p = _M_buckets[__bkt];
1863 if (!__prev_p)
1864 return nullptr;
1865
1866 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1867 __p = __p->_M_next())
1868 {
1869 if (this->_M_equals_tr(__k, __code, *__p))
1870 return __prev_p;
1871
1872 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1873 break;
1874 __prev_p = __p;
1875 }
1876
1877 return nullptr;
1878 }
1879
1880 template<typename _Key, typename _Value, typename _Alloc,
1881 typename _ExtractKey, typename _Equal,
1882 typename _Hash, typename _RangeHash, typename _Unused,
1883 typename _RehashPolicy, typename _Traits>
1884 void
1885 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1886 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1887 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
1888 {
1889 if (_M_buckets[__bkt])
1890 {
1891 // Bucket is not empty, we just need to insert the new node
1892 // after the bucket before begin.
1893 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
1894 _M_buckets[__bkt]->_M_nxt = __node;
1895 }
1896 else
1897 {
1898 // The bucket is empty, the new node is inserted at the
1899 // beginning of the singly-linked list and the bucket will
1900 // contain _M_before_begin pointer.
1901 __node->_M_nxt = _M_before_begin._M_nxt;
1902 _M_before_begin._M_nxt = __node;
1903
1904 if (__node->_M_nxt)
1905 // We must update former begin bucket that is pointing to
1906 // _M_before_begin.
1907 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
1908
1909 _M_buckets[__bkt] = &_M_before_begin;
1910 }
1911 }
1912
1913 template<typename _Key, typename _Value, typename _Alloc,
1914 typename _ExtractKey, typename _Equal,
1915 typename _Hash, typename _RangeHash, typename _Unused,
1916 typename _RehashPolicy, typename _Traits>
1917 void
1918 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1919 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1920 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
1921 size_type __next_bkt)
1922 {
1923 if (!__next || __next_bkt != __bkt)
1924 {
1925 // Bucket is now empty
1926 // First update next bucket if any
1927 if (__next)
1928 _M_buckets[__next_bkt] = _M_buckets[__bkt];
1929
1930 // Second update before begin node if necessary
1931 if (&_M_before_begin == _M_buckets[__bkt])
1932 _M_before_begin._M_nxt = __next;
1933 _M_buckets[__bkt] = nullptr;
1934 }
1935 }
1936
1937 template<typename _Key, typename _Value, typename _Alloc,
1938 typename _ExtractKey, typename _Equal,
1939 typename _Hash, typename _RangeHash, typename _Unused,
1940 typename _RehashPolicy, typename _Traits>
1941 auto
1942 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1943 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1944 _M_get_previous_node(size_type __bkt, __node_ptr __n)
1945 -> __node_base_ptr
1946 {
1947 __node_base_ptr __prev_n = _M_buckets[__bkt];
1948 while (__prev_n->_M_nxt != __n)
1949 __prev_n = __prev_n->_M_nxt;
1950 return __prev_n;
1951 }
1952
1953 template<typename _Key, typename _Value, typename _Alloc,
1954 typename _ExtractKey, typename _Equal,
1955 typename _Hash, typename _RangeHash, typename _Unused,
1956 typename _RehashPolicy, typename _Traits>
1957 template<typename... _Args>
1958 auto
1959 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1960 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1961 _M_emplace(true_type /* __uks */, _Args&&... __args)
1962 -> pair<iterator, bool>
1963 {
1964 // First build the node to get access to the hash code
1965 _Scoped_node __node { this, std::forward<_Args>(__args)... };
1966 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
1967 __hash_code __code = this->_M_hash_code(__k);
1968 size_type __bkt = _M_bucket_index(__code);
1969 if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
1970 // There is already an equivalent node, no insertion
1971 return std::make_pair(iterator(__p), false);
1972
1973 // Insert the node
1974 auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
1975 __node._M_node = nullptr;
1976 return { __pos, true };
1977 }
1978
1979 template<typename _Key, typename _Value, typename _Alloc,
1980 typename _ExtractKey, typename _Equal,
1981 typename _Hash, typename _RangeHash, typename _Unused,
1982 typename _RehashPolicy, typename _Traits>
1983 template<typename... _Args>
1984 auto
1985 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1986 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1987 _M_emplace(const_iterator __hint, false_type /* __uks */,
1988 _Args&&... __args)
1989 -> iterator
1990 {
1991 // First build the node to get its hash code.
1992 _Scoped_node __node { this, std::forward<_Args>(__args)... };
1993 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
1994
1995 __hash_code __code = this->_M_hash_code(__k);
1996 auto __pos
1997 = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
1998 __node._M_node = nullptr;
1999 return __pos;
2000 }
2001
2002 template<typename _Key, typename _Value, typename _Alloc,
2003 typename _ExtractKey, typename _Equal,
2004 typename _Hash, typename _RangeHash, typename _Unused,
2005 typename _RehashPolicy, typename _Traits>
2006 auto
2007 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2008 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2009 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2010 __node_ptr __node, size_type __n_elt)
2011 -> iterator
2012 {
2013 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2014 std::pair<bool, std::size_t> __do_rehash
2015 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2016 __n_elt);
2017
2018 if (__do_rehash.first)
2019 {
2020 _M_rehash(__do_rehash.second, __saved_state);
2021 __bkt = _M_bucket_index(__code);
2022 }
2023
2024 this->_M_store_code(*__node, __code);
2025
2026 // Always insert at the beginning of the bucket.
2027 _M_insert_bucket_begin(__bkt, __node);
2028 ++_M_element_count;
2029 return iterator(__node);
2030 }
2031
2032 template<typename _Key, typename _Value, typename _Alloc,
2033 typename _ExtractKey, typename _Equal,
2034 typename _Hash, typename _RangeHash, typename _Unused,
2035 typename _RehashPolicy, typename _Traits>
2036 auto
2037 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2038 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2039 _M_insert_multi_node(__node_ptr __hint,
2040 __hash_code __code, __node_ptr __node)
2041 -> iterator
2042 {
2043 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2044 std::pair<bool, std::size_t> __do_rehash
2045 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2046
2047 if (__do_rehash.first)
2048 _M_rehash(__do_rehash.second, __saved_state);
2049
2050 this->_M_store_code(*__node, __code);
2051 const key_type& __k = _ExtractKey{}(__node->_M_v());
2052 size_type __bkt = _M_bucket_index(__code);
2053
2054 // Find the node before an equivalent one or use hint if it exists and
2055 // if it is equivalent.
2056 __node_base_ptr __prev
2057 = __builtin_expect(__hint != nullptr, false)
2058 && this->_M_equals(__k, __code, *__hint)
2059 ? __hint
2060 : _M_find_before_node(__bkt, __k, __code);
2061
2062 if (__prev)
2063 {
2064 // Insert after the node before the equivalent one.
2065 __node->_M_nxt = __prev->_M_nxt;
2066 __prev->_M_nxt = __node;
2067 if (__builtin_expect(__prev == __hint, false))
2068 // hint might be the last bucket node, in this case we need to
2069 // update next bucket.
2070 if (__node->_M_nxt
2071 && !this->_M_equals(__k, __code, *__node->_M_next()))
2072 {
2073 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2074 if (__next_bkt != __bkt)
2075 _M_buckets[__next_bkt] = __node;
2076 }
2077 }
2078 else
2079 // The inserted node has no equivalent in the hashtable. We must
2080 // insert the new node at the beginning of the bucket to preserve
2081 // equivalent elements' relative positions.
2082 _M_insert_bucket_begin(__bkt, __node);
2083 ++_M_element_count;
2084 return iterator(__node);
2085 }
2086
2087 // Insert v if no element with its key is already present.
2088 template<typename _Key, typename _Value, typename _Alloc,
2089 typename _ExtractKey, typename _Equal,
2090 typename _Hash, typename _RangeHash, typename _Unused,
2091 typename _RehashPolicy, typename _Traits>
2092 template<typename _Arg, typename _NodeGenerator>
2093 auto
2094 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2095 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2096 _M_insert(_Arg&& __v, const _NodeGenerator& __node_gen,
2097 true_type /* __uks */)
2098 -> pair<iterator, bool>
2099 {
2100 const key_type& __k = _ExtractKey{}(__v);
2101 __hash_code __code = this->_M_hash_code(__k);
2102 size_type __bkt = _M_bucket_index(__code);
2103
2104 if (__node_ptr __node = _M_find_node(__bkt, __k, __code))
2105 return { iterator(__node), false };
2106
2107 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2108 auto __pos
2109 = _M_insert_unique_node(__bkt, __code, __node._M_node);
2110 __node._M_node = nullptr;
2111 return { __pos, true };
2112 }
2113
2114 // Insert v unconditionally.
2115 template<typename _Key, typename _Value, typename _Alloc,
2116 typename _ExtractKey, typename _Equal,
2117 typename _Hash, typename _RangeHash, typename _Unused,
2118 typename _RehashPolicy, typename _Traits>
2119 template<typename _Arg, typename _NodeGenerator>
2120 auto
2121 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2122 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2123 _M_insert(const_iterator __hint, _Arg&& __v,
2124 const _NodeGenerator& __node_gen,
2125 false_type /* __uks */)
2126 -> iterator
2127 {
2128 // First compute the hash code so that we don't do anything if it
2129 // throws.
2130 __hash_code __code = this->_M_hash_code(_ExtractKey{}(__v));
2131
2132 // Second allocate new node so that we don't rehash if it throws.
2133 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2134 auto __pos
2135 = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
2136 __node._M_node = nullptr;
2137 return __pos;
2138 }
2139
2140 template<typename _Key, typename _Value, typename _Alloc,
2141 typename _ExtractKey, typename _Equal,
2142 typename _Hash, typename _RangeHash, typename _Unused,
2143 typename _RehashPolicy, typename _Traits>
2144 auto
2145 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2146 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2147 erase(const_iterator __it)
2148 -> iterator
2149 {
2150 __node_ptr __n = __it._M_cur;
2151 std::size_t __bkt = _M_bucket_index(*__n);
2152
2153 // Look for previous node to unlink it from the erased one, this
2154 // is why we need buckets to contain the before begin to make
2155 // this search fast.
2156 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2157 return _M_erase(__bkt, __prev_n, __n);
2158 }
2159
2160 template<typename _Key, typename _Value, typename _Alloc,
2161 typename _ExtractKey, typename _Equal,
2162 typename _Hash, typename _RangeHash, typename _Unused,
2163 typename _RehashPolicy, typename _Traits>
2164 auto
2165 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2166 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2167 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2168 -> iterator
2169 {
2170 if (__prev_n == _M_buckets[__bkt])
2171 _M_remove_bucket_begin(__bkt, __n->_M_next(),
2172 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2173 else if (__n->_M_nxt)
2174 {
2175 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2176 if (__next_bkt != __bkt)
2177 _M_buckets[__next_bkt] = __prev_n;
2178 }
2179
2180 __prev_n->_M_nxt = __n->_M_nxt;
2181 iterator __result(__n->_M_next());
2182 this->_M_deallocate_node(__n);
2183 --_M_element_count;
2184
2185 return __result;
2186 }
2187
2188 template<typename _Key, typename _Value, typename _Alloc,
2189 typename _ExtractKey, typename _Equal,
2190 typename _Hash, typename _RangeHash, typename _Unused,
2191 typename _RehashPolicy, typename _Traits>
2192 auto
2193 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2194 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2195 _M_erase(true_type /* __uks */, const key_type& __k)
2196 -> size_type
2197 {
2198 __hash_code __code = this->_M_hash_code(__k);
2199 std::size_t __bkt = _M_bucket_index(__code);
2200
2201 // Look for the node before the first matching node.
2202 __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
2203 if (!__prev_n)
2204 return 0;
2205
2206 // We found a matching node, erase it.
2207 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2208 _M_erase(__bkt, __prev_n, __n);
2209 return 1;
2210 }
2211
2212 template<typename _Key, typename _Value, typename _Alloc,
2213 typename _ExtractKey, typename _Equal,
2214 typename _Hash, typename _RangeHash, typename _Unused,
2215 typename _RehashPolicy, typename _Traits>
2216 auto
2217 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2218 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2219 _M_erase(false_type /* __uks */, const key_type& __k)
2220 -> size_type
2221 {
2222 __hash_code __code = this->_M_hash_code(__k);
2223 std::size_t __bkt = _M_bucket_index(__code);
2224
2225 // Look for the node before the first matching node.
2226 __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
2227 if (!__prev_n)
2228 return 0;
2229
2230 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2231 // 526. Is it undefined if a function in the standard changes
2232 // in parameters?
2233 // We use one loop to find all matching nodes and another to deallocate
2234 // them so that the key stays valid during the first loop. It might be
2235 // invalidated indirectly when destroying nodes.
2236 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2237 __node_ptr __n_last = __n->_M_next();
2238 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2239 __n_last = __n_last->_M_next();
2240
2241 std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2242
2243 // Deallocate nodes.
2244 size_type __result = 0;
2245 do
2246 {
2247 __node_ptr __p = __n->_M_next();
2248 this->_M_deallocate_node(__n);
2249 __n = __p;
2250 ++__result;
2251 }
2252 while (__n != __n_last);
2253
2254 _M_element_count -= __result;
2255 if (__prev_n == _M_buckets[__bkt])
2256 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2257 else if (__n_last_bkt != __bkt)
2258 _M_buckets[__n_last_bkt] = __prev_n;
2259 __prev_n->_M_nxt = __n_last;
2260 return __result;
2261 }
2262
2263 template<typename _Key, typename _Value, typename _Alloc,
2264 typename _ExtractKey, typename _Equal,
2265 typename _Hash, typename _RangeHash, typename _Unused,
2266 typename _RehashPolicy, typename _Traits>
2267 auto
2268 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2269 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2270 erase(const_iterator __first, const_iterator __last)
2271 -> iterator
2272 {
2273 __node_ptr __n = __first._M_cur;
2274 __node_ptr __last_n = __last._M_cur;
2275 if (__n == __last_n)
2276 return iterator(__n);
2277
2278 std::size_t __bkt = _M_bucket_index(*__n);
2279
2280 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2281 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2282 std::size_t __n_bkt = __bkt;
2283 for (;;)
2284 {
2285 do
2286 {
2287 __node_ptr __tmp = __n;
2288 __n = __n->_M_next();
2289 this->_M_deallocate_node(__tmp);
2290 --_M_element_count;
2291 if (!__n)
2292 break;
2293 __n_bkt = _M_bucket_index(*__n);
2294 }
2295 while (__n != __last_n && __n_bkt == __bkt);
2296 if (__is_bucket_begin)
2297 _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2298 if (__n == __last_n)
2299 break;
2300 __is_bucket_begin = true;
2301 __bkt = __n_bkt;
2302 }
2303
2304 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2305 _M_buckets[__n_bkt] = __prev_n;
2306 __prev_n->_M_nxt = __n;
2307 return iterator(__n);
2308 }
2309
2310 template<typename _Key, typename _Value, typename _Alloc,
2311 typename _ExtractKey, typename _Equal,
2312 typename _Hash, typename _RangeHash, typename _Unused,
2313 typename _RehashPolicy, typename _Traits>
2314 void
2315 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2316 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2317 clear() noexcept
2318 {
2319 this->_M_deallocate_nodes(_M_begin());
2320 __builtin_memset(_M_buckets, 0,
2321 _M_bucket_count * sizeof(__node_base_ptr));
2322 _M_element_count = 0;
2323 _M_before_begin._M_nxt = nullptr;
2324 }
2325
2326 template<typename _Key, typename _Value, typename _Alloc,
2327 typename _ExtractKey, typename _Equal,
2328 typename _Hash, typename _RangeHash, typename _Unused,
2329 typename _RehashPolicy, typename _Traits>
2330 void
2331 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2332 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2333 rehash(size_type __bkt_count)
2334 {
2335 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2336 __bkt_count
2337 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2338 __bkt_count);
2339 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2340
2341 if (__bkt_count != _M_bucket_count)
2342 _M_rehash(__bkt_count, __saved_state);
2343 else
2344 // No rehash, restore previous state to keep it consistent with
2345 // container state.
2346 _M_rehash_policy._M_reset(__saved_state);
2347 }
2348
2349 template<typename _Key, typename _Value, typename _Alloc,
2350 typename _ExtractKey, typename _Equal,
2351 typename _Hash, typename _RangeHash, typename _Unused,
2352 typename _RehashPolicy, typename _Traits>
2353 void
2354 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2355 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2356 _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2357 {
2358 __try
2359 {
2360 _M_rehash_aux(__bkt_count, __unique_keys{});
2361 }
2362 __catch(...)
2363 {
2364 // A failure here means that buckets allocation failed. We only
2365 // have to restore hash policy previous state.
2366 _M_rehash_policy._M_reset(__state);
2367 __throw_exception_again;
2368 }
2369 }
2370
2371 // Rehash when there is no equivalent elements.
2372 template<typename _Key, typename _Value, typename _Alloc,
2373 typename _ExtractKey, typename _Equal,
2374 typename _Hash, typename _RangeHash, typename _Unused,
2375 typename _RehashPolicy, typename _Traits>
2376 void
2377 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2378 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2379 _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2380 {
2381 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2382 __node_ptr __p = _M_begin();
2383 _M_before_begin._M_nxt = nullptr;
2384 std::size_t __bbegin_bkt = 0;
2385 while (__p)
2386 {
2387 __node_ptr __next = __p->_M_next();
2388 std::size_t __bkt
2389 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2390 if (!__new_buckets[__bkt])
2391 {
2392 __p->_M_nxt = _M_before_begin._M_nxt;
2393 _M_before_begin._M_nxt = __p;
2394 __new_buckets[__bkt] = &_M_before_begin;
2395 if (__p->_M_nxt)
2396 __new_buckets[__bbegin_bkt] = __p;
2397 __bbegin_bkt = __bkt;
2398 }
2399 else
2400 {
2401 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2402 __new_buckets[__bkt]->_M_nxt = __p;
2403 }
2404
2405 __p = __next;
2406 }
2407
2408 _M_deallocate_buckets();
2409 _M_bucket_count = __bkt_count;
2410 _M_buckets = __new_buckets;
2411 }
2412
2413 // Rehash when there can be equivalent elements, preserve their relative
2414 // order.
2415 template<typename _Key, typename _Value, typename _Alloc,
2416 typename _ExtractKey, typename _Equal,
2417 typename _Hash, typename _RangeHash, typename _Unused,
2418 typename _RehashPolicy, typename _Traits>
2419 void
2420 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2421 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2422 _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2423 {
2424 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2425 __node_ptr __p = _M_begin();
2426 _M_before_begin._M_nxt = nullptr;
2427 std::size_t __bbegin_bkt = 0;
2428 std::size_t __prev_bkt = 0;
2429 __node_ptr __prev_p = nullptr;
2430 bool __check_bucket = false;
2431
2432 while (__p)
2433 {
2434 __node_ptr __next = __p->_M_next();
2435 std::size_t __bkt
2436 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2437
2438 if (__prev_p && __prev_bkt == __bkt)
2439 {
2440 // Previous insert was already in this bucket, we insert after
2441 // the previously inserted one to preserve equivalent elements
2442 // relative order.
2443 __p->_M_nxt = __prev_p->_M_nxt;
2444 __prev_p->_M_nxt = __p;
2445
2446 // Inserting after a node in a bucket require to check that we
2447 // haven't change the bucket last node, in this case next
2448 // bucket containing its before begin node must be updated. We
2449 // schedule a check as soon as we move out of the sequence of
2450 // equivalent nodes to limit the number of checks.
2451 __check_bucket = true;
2452 }
2453 else
2454 {
2455 if (__check_bucket)
2456 {
2457 // Check if we shall update the next bucket because of
2458 // insertions into __prev_bkt bucket.
2459 if (__prev_p->_M_nxt)
2460 {
2461 std::size_t __next_bkt
2462 = __hash_code_base::_M_bucket_index(
2463 *__prev_p->_M_next(), __bkt_count);
2464 if (__next_bkt != __prev_bkt)
2465 __new_buckets[__next_bkt] = __prev_p;
2466 }
2467 __check_bucket = false;
2468 }
2469
2470 if (!__new_buckets[__bkt])
2471 {
2472 __p->_M_nxt = _M_before_begin._M_nxt;
2473 _M_before_begin._M_nxt = __p;
2474 __new_buckets[__bkt] = &_M_before_begin;
2475 if (__p->_M_nxt)
2476 __new_buckets[__bbegin_bkt] = __p;
2477 __bbegin_bkt = __bkt;
2478 }
2479 else
2480 {
2481 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2482 __new_buckets[__bkt]->_M_nxt = __p;
2483 }
2484 }
2485 __prev_p = __p;
2486 __prev_bkt = __bkt;
2487 __p = __next;
2488 }
2489
2490 if (__check_bucket && __prev_p->_M_nxt)
2491 {
2492 std::size_t __next_bkt
2493 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2494 __bkt_count);
2495 if (__next_bkt != __prev_bkt)
2496 __new_buckets[__next_bkt] = __prev_p;
2497 }
2498
2499 _M_deallocate_buckets();
2500 _M_bucket_count = __bkt_count;
2501 _M_buckets = __new_buckets;
2502 }
2503
2504#if __cplusplus > 201402L
2505 template<typename, typename, typename> class _Hash_merge_helper { };
2506#endif // C++17
2507
2508#if __cpp_deduction_guides >= 201606
2509 // Used to constrain deduction guides
2510 template<typename _Hash>
2511 using _RequireNotAllocatorOrIntegral
2512 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2513#endif
2514
2515_GLIBCXX_END_NAMESPACE_VERSION
2516} // namespace std
2517
2518#endif // _HASHTABLE_H
2519