Index: libstdc++-v3/include/Makefile.am
===================================================================
--- libstdc++-v3/include/Makefile.am (revision 227597)
+++ libstdc++-v3/include/Makefile.am (working copy)
@@ -96,6 +96,7 @@
${bits_srcdir}/codecvt.h \
${bits_srcdir}/concept_check.h \
${bits_srcdir}/cpp_type_traits.h \
+ ${bits_srcdir}/cuckoo.h \
${bits_srcdir}/deque.tcc \
${bits_srcdir}/enable_special_members.h \
${bits_srcdir}/forward_list.h \
Index: libstdc++-v3/include/Makefile.in
===================================================================
--- libstdc++-v3/include/Makefile.in (revision 227597)
+++ libstdc++-v3/include/Makefile.in (working copy)
@@ -386,6 +386,7 @@
${bits_srcdir}/codecvt.h \
${bits_srcdir}/concept_check.h \
${bits_srcdir}/cpp_type_traits.h \
+ ${bits_srcdir}/cuckoo.h \
${bits_srcdir}/deque.tcc \
${bits_srcdir}/enable_special_members.h \
${bits_srcdir}/forward_list.h \
Index: libstdc++-v3/include/bits/cuckoo.h
===================================================================
--- libstdc++-v3/include/bits/cuckoo.h (revision 0)
+++ libstdc++-v3/include/bits/cuckoo.h (working copy)
@@ -0,0 +1,3103 @@
+// hybrid wild cuckoo hashtable implementation -*- C++ -*-
+
+// Copyright (C) 2010-2015 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file bits/cuckoo.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{cuckoo}
+ * This file implements Hybrid Wild Cuckoo and Wild Cuckoo. The latter is used
+ * as a building block for Hybrid Wild Cuckoo when all the bells and whistles
+ * are required (as they are for a C++ standards-compliant unordered_{set,map}).
+ * It may also be interesting on its own.
+ *
+ * For simplicity we use one table (as is also done in some variants of Cuckoo).
+ * We also force the table size to be a power of 2 in this implementation,
+ * though there is no theoretical reason to do so. The number of elements in
+ * the table is _M_num_elements in the code, but called "n" here; the number of
+ * buckets is _M_num_buckets in the code, but called "b" here.
+ *
+ * As usual in such discussions, the big-O bounds stated below
+ * assume that hashing an element is O(1). We also ignore the bitset
+ * size and z, parameters typically set to small integers, when
+ * discussing big-O bounds. E.g., we never write O(z); we write O(1)
+ * for that.
+ *
+ * In Wild Cuckoo, the algorithm for inserting an item is similar to Cuckoo
+ * except that the set of possible "nests" for an item can grow dynamically.
+ * In particular, the algorithm for find-or-insert-if-not-present (often called
+ * "insert" among C++ programmers) is as follows:
+ * 1. Compute g1 = H1(key) and g2 = H2(key), where H1 and H2 are hash functions.
+ * 2. Let h1 = g1 % b and h2 = g2 % b
+ * 3. Look for the key in all its possible nests. Typically these are just
+ * h1, h1^1, h1^2, ..., h1^z, h2, h2^1, h2^2, ..., and h2^z. (Say, for z = 4.)
+ * However, we also keep a map of pairs to information on additional
+ * possible nests, which can be anywhere in the table. Pairs with additional
+ * possible nests are called "extended." More details on this
+ * are below. A key point is that, unlike in all previous
+ * multiple-choice-hashing schemes, a key with any hash codes can appear
+ * anywhere in the table... Hence the "Wild" in Wild Cuckoo.
+ * 4. If the key is not found in any of its nests then it must be inserted,
+ * into a empty nest if possible. If no nest is free then we have two
+ * choices:
+ * o If the size of all extensions to is below the "extension cap,"
+ * a constant, and either is extended or a coin flip is heads,
+ * then (further) extend the set of possible set of nests for .
+ * o Otherwise, bump an element in one of the relevant nests, put the new
+ * item in its place, and then re-insert the item that was bumped.
+ *
+ * When inserting an item known to be not present, we follow a substantially
+ * similar procedure. That's a bit of a departure from Cuckoo, which will
+ * prefer to bump immediately rather than compute H2(key). Whether this matters
+ * is an open question.
+ *
+ * In the variation of Wild Cuckoo implemented here, we use the concept of a
+ * sequence of buckets defined by an ordered pair of indices, i and j:
+ * The sequence is given by the values taken by y in this pseudocode (assume
+ * i != j and all math is mod b):
+ * y = i
+ * repeat for k in {0, ..., b-2}:
+ * if (j - i + k == 0) break; else y += j - i + k;
+ * y = i - (j - i - 1)
+ * repeat for l in {2, ..., b-1}:
+ * y -= j - i - l;
+ * We never use more than b values from such a sequence, and the first b values
+ * are unique if i != j and b is a power of 2. (Reasoning: it builds off the
+ * well-known fact that if b is a power of two then the first b elements of
+ * 0, 1, 3, 6, 10, ... are unique (mod b).)
+ *
+ * The first "extension" for a particular pair is any one bucket, the
+ * second is any two buckets, the third is four buckets specified as the first
+ * four elements of a sequence defined by a pair of buckets, the fourth is the
+ * first eight buckets specified as the first eight elements of a sequence
+ * defined by a pair of buckets, etc.
+ *
+ * Conceptually there's a hash map for extensions, with keys being
+ * pairs and values being the specification of a permutation of bucket indices
+ * and how much of that permutation to use. Unlike everything else, these
+ * key-value pairs are stored in "chaining" style, in the same table as our
+ * pointers to Values. A bucket's pointer can be NULL or point to one of these:
+ * 1. a single Value (and nothing else---these are not linked lists)
+ * 2. a linked list of Extension (chaining style, for our bookkeeping)
+ * A per-bucket "hint," one extra bit per bucket, distinguishes these cases.
+ * (It may be possible to "steal" a bit from pointers in buckets, thus
+ * eliminating the need to store hints separately.)
+ *
+ * Hybrid Wild Cuckoo is similar in spirit to Wild Cuckoo except that it starts
+ * with a more traditional and faster "hash the key once and hope for the best"
+ * strategy. We use H0 as the hash function. Only buckets that are "overfull"
+ * get the Wild Cuckoo treatment, using hash functions H1 and H2. In Hybrid
+ * Wild Cuckoo, hints are larger, because they have space to store a small
+ * bitset that in the best case records where to find all the keys for this
+ * bucket. If the small bitset is not sufficient to encode that information
+ * then the bucket is designated as "overfull" and Wild Cuckoo is enabled for
+ * that bucket.
+ *
+ * A few more details:
+ * o insert invalidates no iterators
+ * o erase invalidates no iterators except for those pointing to the erased key
+ * o Iterator "re-validation" can take O(1) time. Iterator "re-validation"
+ * occurs when the bucket number stored is stale, but we need an
+ * accurate bucket number, e.g., because we are iterating, or the
+ * iterator is passed to erase(). See Appendix for how it could be done.
+ * o erase(iterator) takes O(1 + time to re-validate the iterator) time. So, if
+ * iterator re-validation is O(1) then erase(iterator) is also. (But,
+ * erase(begin()) may be slower than that if we start to cache what begin()
+ * would return?)
+ *
+ * As before, conceptually a "bucket" is a list of items that all hash to the
+ * same value mod b. A "slot" is an entry in the hashtable. Conceptually, it
+ * is a pair of values, a hint and a pointer. These can be stored in two
+ * separate arrays, or in one array of uintptr_t. The latter is possible if we
+ * can "steal" bits from pointers because we know that certain values for
+ * pointers are unsupported.
+ *
+ * If hash collisions are rare then a lot of times the bucket for an
+ * item, x, will contain only x, and the slot it will use is
+ * ht_->_M_slots[H0(x) % b]. The hint in slot ht_->_M_slots[i] always contains
+ * information about the bucket of items that hash to i (mod b).
+ *
+ * In Hybrid Wild Cuckoo, a bucket's pointer points to one of two things:
+ * 1. a Value (and nothing else---these are not linked lists)
+ * 2. a linked list of Extension (chaining style, for our bookkeeping)
+ *
+ * Commit-or-Rollback Semantics
+ *
+ * We implement commit-or-rollback semantics for certain operations, as required
+ * by the C++ standard.
+ *
+ * A cuckoo hash insertion can rearrange items, though in practice it usually
+ * doesn't. We have "rollback state" to capture what we're changing. In the
+ * code, _M_get_rollback_state() is initially null, and we call this the "easy" state.
+ * For example, perhaps all we need to do is to copy an object and update
+ * _M_slots and _M_num_elements. In that case the copy requires allocation, but
+ * we've arranged the code to have nothing to rollback if that throws. The
+ * "hard" state, on the other hand, has non-null _M_get_rollback_state() that we
+ * update as we go, because there may more operations to come, and if those
+ * throw we have to roll back.
+ *
+ * Appendix 1: Things that will be added
+ *
+ * o Proper support for allocators
+ *
+ * o O(1) time for begin. The initial implementation will likely be to keep
+ * track of the lowest-numbered bucket that contains a Value.
+ *
+ * o Proper support for allocators
+ *
+ * Appendix: Things that might be added
+ *
+ * o Iteration through all n elements takes O(n) time (not O(b)):
+ * The easy way to deal with this is, I think, to have extra information
+ * that is maintained only when N/B < (some constant). So when N/B is really
+ * low we have this extra info, and when it is not, we know O(B) = O(N) so
+ * straightforward techniques are fine (e.g., skipping over as many empty
+ * buckets as necessary when advancing an iterator). We keep a doubly-linked
+ * list embedded in an array of length B when N/B is tiny. There's not much
+ * reason to complicate this. We simply encode the prev/next data in the
+ * extra array. The type of "prev" and "next" is _Bucket_number. When the
+ * number of elements grows beyond some threshold we can discard the extra
+ * array. The "discard" operation takes O(N) = O(B) time since N/B is very
+ * near some constant at the time it occurs. Similarly, if deletions cause
+ * N/B to get tiny again then we can revive the extra array and reconstruct
+ * the doubly-linked list at a cost of O(N) = O(B).
+ *
+ * O(1) iterator re-validation: Given an iterator, that is little more than a
+ * pointer to a Value, how do we find the bucket pointing to that Value? If
+ * the extension cap is finite then the number of possible nests for any
+ * key is bounded by a constant, so we can use find(). Unfortunately, in
+ * practice, there are contexts where using an infinite (or very large)
+ * extension cap makes sense. So, for only those keys with a large number
+ * of possible nests, we could store a separate hash map that allows relevant
+ * pointers to be mapped back to buckets in constant time. However, it
+ * seems like more trouble than its worth, so this isn't implemented.
+ *
+ * local iterators that approach the spirit of the C++ standard rather than just
+ * meeting the minimum requirements: this is a low priority because local
+ * iterators are an obscure, seldom-used feature. That said, it is certainly
+ * possible to do some work in this area.
+ *
+ * unordered_multiset and unordered_multimap: no decision has been made on how
+ * to implement these.
+ */
+
+#ifndef _CUCKOO_H
+#define _CUCKOO_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if __cpp_exceptions
+#include
+#endif
+#include
+#include
+#include
+#include
+#include
+
+
+namespace cuckoo_internal
+{
+ typedef size_t size_type;
+ typedef size_t _Bucket_number;
+
+ // _Cuckoos: either empty or a pair of hash function objects, depending on
+ // whether select_from_hash_function_family() is present in _V. _HasFamily
+ // is a helper whose purpose is to provide an _S_cuckoo.
+ template
+ struct _HasFamily
+ {
+ // SFINAE will make the first overload available if _V has a suitable
+ // select_from_hash_function_family() method.
+ template
+ static char
+ _S_func(_V (*)
+ [sizeof(((_W*)0)
+ ->select_from_hash_function_family((std::knuth_b*)0))]);
+
+ template
+ static int
+ _S_func(...);
+
+ enum
+ {
+ _S_cuckoo = sizeof(_S_func<_V>(0)) == sizeof(char)
+ };
+ };
+
+ template::_S_cuckoo> struct _Cuckoos;
+
+ template
+ struct _Cuckoos<_V, true> : public _HasFamily<_V>
+ {
+ // TODO: invoke this from the appropriate places.
+ template
+ void
+ _M_select_hash_functions(_Hasher* __h, R* __rng)
+ {
+ _M_hashers[0] = __h->select_from_hash_function_family(__rng);
+ _M_hashers[1] = __h->select_from_hash_function_family(__rng);
+ }
+
+ template
+ size_type
+ _M_hash1(const _Key& __v, size_type) const
+ { return _M_hashers[0](__v); }
+
+ template
+ size_type
+ _M_hash2(const _Key& __v, size_type) const
+ { return _M_hashers[1](__v); }
+
+ decltype(((_V*)0)->select_from_hash_function_family((std::knuth_b*)0)) _M_hashers[2];
+ };
+
+ template
+ struct _Cuckoos<_V, false> : public _HasFamily<_V>
+ {
+ // When the client hasn't provided us with select_from_hash_function_family,
+ // _M_hash1 and _M_hash2 fall back to rehashing the output of the one hash
+ // function that the client did provide.
+ template
+ void
+ _M_select_hash_functions(__Hasher*, _RNG* r)
+ {
+ _M_salt1 = _S_make_salt(r);
+ _M_salt2 = _S_make_salt(r);
+ while (_M_salt1 == _M_salt2)
+ {
+ _M_salt1 += (*r)() * 2;
+ }
+ assert(_M_salt1 % 2 == 1 && _M_salt2 % 2 == 1);
+ }
+
+ template
+ static size_type
+ _S_make_salt(_RNG* __r)
+ {
+ size_type __best_salt = 0;
+ size_type __best_score = 0;
+ int __tries = 2 + ((*__r)() & 0x7);
+ for (int __i = 0; __i < __tries; __i++)
+ {
+ size_type __salt = (*__r)() | 1;
+ size_type __q = _S_salt_score(__salt);
+ if (__q > __best_score) {
+ __best_score = __q;
+ __best_salt = __salt;
+ }
+ }
+ return __best_salt;
+ }
+
+ // Higher is better.
+ static size_type
+ _S_salt_score(size_type __salt)
+ {
+ const auto __bits = std::numeric_limits::digits;
+ return (std::numeric_limits::max()
+ - abs(popcnt(__salt) - __bits/2));
+ }
+
+ template
+ static int
+ popcnt(__I __n)
+ {
+ const int __shift = sizeof(__n) <= sizeof(unsigned int)
+ ? 0
+ : std::numeric_limits::digits;
+ if (sizeof(__n) <= sizeof(unsigned int))
+ {
+ return std::bitset::digits>(__n).count();
+ }
+ else
+ {
+ int __count = 0;
+ while (__n != 0)
+ {
+ __count +=
+ std::bitset::digits>(__n).count();
+ __n >>= __shift;
+ }
+ return __count;
+ }
+ }
+
+ static size_type
+ _S_rehash(size_type __x, size_type __y)
+ {
+ const auto __bits = std::numeric_limits::digits;
+ const int __shift = __bits * 30 / 32 - 13;
+ __x += __y >> 10;
+ __x *= __y;
+ __x ^= __x >> __shift;
+ __x *= __y;
+ __x ^= __x >> (__shift ^ 1);
+ return __x;
+ }
+
+ template
+ size_type
+ _M_hash1(const __Key&, size_type g0) const
+ { return _S_rehash(g0, _M_salt1); }
+
+ template
+ size_type
+ _M_hash2(const __Key&, size_type g0) const
+ { return _S_rehash(g0, _M_salt2); }
+
+ size_type _M_salt1;
+ size_type _M_salt2;
+ };
+
+ // _Settings contains parameters for growing and shrinking the table.
+ // It also packages a hasher that may have zero size.
+ template
+ class _Cuckoo_settings : public _Hasher, public _Cuckoos<_Hasher>
+ {
+ public:
+ typedef _Key key_type;
+ typedef _Hasher hasher;
+
+ _Cuckoo_settings(const hasher& hf,
+ const float __enlarge,
+ const float __shrink)
+ : hasher(hf),
+ _M_enlarge_threshold(0),
+ _M_shrink_threshold(0),
+ _M_num_hashtable_copies(0)
+ {
+ set_enlarge_factor(__enlarge);
+ set_shrink_factor(__shrink);
+ }
+
+ size_type
+ _M_hash0(const key_type& __v) const
+ { return hasher::operator()(__v); }
+
+ void
+ set_enlarge_factor(float __f)
+ { _M_enlarge_factor = __f; }
+
+ void
+ set_shrink_factor(float __f)
+ { _M_shrink_factor = __f; }
+
+ void
+ set_enlarge_threshold(size_type __t)
+ { _M_enlarge_threshold = __t; }
+
+ void
+ set_shrink_threshold(size_type __t)
+ { _M_shrink_threshold = __t < _S_min_buckets ? 0 : __t; }
+
+ size_type
+ enlarge_size(size_type __x) const
+ { return static_cast(__x * _M_enlarge_factor); }
+
+ size_type
+ shrink_size(size_type __x) const
+ { return static_cast(__x * _M_shrink_factor); }
+
+ size_type
+ _M_num_copies() const
+ { return static_cast(_M_num_hashtable_copies); }
+
+ void
+ _M_increment_num_copies()
+ { ++_M_num_hashtable_copies; }
+
+ // Reset the enlarge and shrink thresholds
+ void
+ _M_reset_thresholds(size_type __num_buckets)
+ {
+ set_enlarge_threshold(enlarge_size(__num_buckets));
+ set_shrink_threshold(shrink_size(__num_buckets));
+ }
+
+ // Caller is resposible for calling reset_threshold right after
+ // set_resizing_parameters.
+ void
+ set_resizing_parameters(float shrink, float grow)
+ {
+ assert(shrink >= 0.0);
+ grow = std::min(grow, 1.0f);
+ // Why 2.2 here? Would 2.0 or some other value be better?
+ // 2.2 should avoid thrashing the size, but is this well considered?
+ shrink = std::min(shrink, grow / 2.2f);
+ set_shrink_factor(shrink);
+ set_enlarge_factor(grow);
+ }
+
+ // Returns the smallest size a hashtable can be without being too crowded.
+ // Except, it will not return a value less than the second arg. Run time is
+ // O(lg(num_elts)): that's subpar theoretically, but reasonable in
+ // context, as rebuilding the table will take at least O(num_elts) time.
+ size_type
+ min_buckets(size_type __num_elts, size_type __min_buckets_wanted)
+ {
+ size_type __result = _S_min_buckets;
+ while (__result < __min_buckets_wanted
+ || (__num_elts
+ >= static_cast(__result * _M_enlarge_factor)))
+ {
+ // Try to avoid overflowing size_type, since __result can exceed
+ // max_size() here.
+ if (static_cast(__result * 2) < __result)
+ {
+#if __cpp_exceptions
+ throw std::length_error("resize overflow");
+#else
+ assert(0 && "resize overflow");
+#endif
+ }
+ __result *= 2;
+ }
+ return __result;
+ }
+
+ size_type _M_enlarge_threshold; // table.size() * enlarge_factor
+ size_type _M_shrink_threshold; // table.size() * shrink_factor
+ float _M_enlarge_factor; // how full before resize
+ float _M_shrink_factor; // how empty before resize
+
+ /* A counter incremented when we copy/move the table. */
+ unsigned int _M_num_hashtable_copies;
+ };
+
+ // Exception throwing utility.
+#ifdef ATTRIBUTE_NORETURN
+ template
+ inline void
+ ThrowException() ATTRIBUTE_NORETURN;
+#endif
+
+ template
+ inline void
+ ThrowException()
+ {
+#if __cpp_exceptions
+ throw _V();
+#endif
+ for (;;) assert(0 && "internal error: unreachable");
+ }
+
+ class _Permutation_iterator
+ {
+ public:
+ typedef cuckoo_internal::_Bucket_number _Bucket_number;
+ typedef cuckoo_internal::size_type size_type;
+ _Permutation_iterator(_Bucket_number __i, _Bucket_number __j,
+ size_type __b /* num_buckets */, size_type __len)
+ : _M_i(__i),
+ _M_j(__j),
+ _M_y(__i),
+ _M_delta(__j - __i),
+ _M_b(__b),
+ _M_len(__len) {
+ assert(0 <= __i && __i < __b && 0 <= __j && __j < __b && __i != __j);
+ assert((__b & (__b - 1)) == 0); // b must be a power of 2.
+ assert(__len <= __b);
+ }
+
+ bool
+ _M_done() const
+ { return _M_len == 0; }
+
+ _Bucket_number
+ _M_next()
+ {
+ assert(!_M_done());
+ --_M_len;
+ // Invariant: _M_y is the value we want. Store it and compute the next value.
+ _Bucket_number __result = _M_y;
+ if (_M_delta == 0)
+ {
+ _M_delta = _M_mod_b(-(_M_j - _M_i - 2));
+ _M_y = _M_mod_b(_M_i - (_M_j - _M_i - 1));
+ }
+ else
+ {
+ _M_y += _M_delta;
+ ++_M_delta;
+ }
+ _M_delta = _M_mod_b(_M_delta);
+ _M_y = _M_mod_b(_M_y);
+ return __result;
+ }
+
+ private:
+ _Bucket_number
+ _M_mod_b(_Bucket_number __x)
+ { return __x & (_M_b - 1); }
+
+ const _Bucket_number _M_i;
+ const _Bucket_number _M_j;
+ _Bucket_number _M_y;
+ _Bucket_number _M_delta;
+ const size_type _M_b;
+ size_type _M_len;
+ };
+
+ // Helpers for compile-time computation.
+
+ template
+ struct _Max
+ { enum { _S_value = _S_x > _S_y ? _S_x : _S_y }; };
+
+ // ceil(lg(x)); requires x >= 1.
+ template
+ struct _Ceil_log2
+ {
+ // The recursive case divides by 16, so that template depth
+ // is kept low. To avoid overflow, we never add anything to x.
+ enum
+ {
+ _S_is_power_of_two = (_S_x & (_S_x - 1)) == 0,
+ _S_value = _S_x == 2 ? 1
+ : _S_x <= 4 ? 2
+ : _S_x <= 8 ? 3
+ : _S_x <= 16 ? 4
+ : _S_x <= 32 ? 5
+ : (_Ceil_log2<(_S_x / 16) | 1>::_S_value
+ + (_S_is_power_of_two ? 3 : 4)) };
+ };
+
+ template<> struct _Ceil_log2<1> {
+ enum { _S_value = 0 };
+ };
+
+ template
+ struct SmallestPowerOfTwoNotLessThan {
+ enum { _S_value = static_cast(1) << _Ceil_log2<_S_x>::_S_value };
+ };
+
+ // Utilities related to random number generators (RNGs).
+
+ // We need random numbers for choosing hash functions from a family,
+ // deciding whether to bump or extend, deciding what to bump, and
+ // _M_random_odd_number. But, none of those matter for very small
+ // tables, so we initialize the RNG lazily, when it's first needed.
+ typedef std::minstd_rand _RNG; // Other choices here would likely be fine too.
+
+ class _RNGState
+ {
+ public:
+ typedef cuckoo_internal::size_type size_type;
+
+ explicit _RNGState(const std::seed_seq& seq)
+ { _M_reset(seq); }
+
+ void
+ _M_reset(const std::seed_seq& __seq)
+ {
+ std::vector __v(__seq.size());
+ __seq.param(__v.begin());
+ std::seed_seq __tmp(__v.begin(), __v.end());
+ _M_rng.seed(__tmp);
+ _M_random_odd_number =
+ _M_rand_at_most(std::numeric_limits::max()) | 1;
+ }
+
+ uint64_t
+ _M_rand_uint64()
+ {
+ constexpr uint64_t __rngmax = _RNG::max();
+ constexpr uint64_t __rngmin = _RNG::min();
+ constexpr uint64_t __rngrange = __rngmax - __rngmin;
+ static_assert(__rngrange + _RNG::min() == _RNG::max(), "overflow");
+ uint64_t __result = static_cast(_M_rng()) - __rngmin;
+ if (__rngrange == std::numeric_limits::max()) return __result;
+ // One value is not enough.
+ const int __bits_per_random_number = _Ceil_log2<__rngrange>::_S_value;
+ for (int __done = __bits_per_random_number; __done < 64;
+ __done += __bits_per_random_number) {
+ __result <<= __bits_per_random_number;
+ __result += static_cast(_M_rng()) - __rngmin;
+ }
+ return __result;
+ }
+
+ // Compute a random number.
+ size_type
+ _M_rand_at_most(size_type __max)
+ {
+ assert(sizeof(__max) <= 8 || __max <= std::numeric_limits::max());
+ if (__max == _M_rng.max()) return _M_rng();
+ if (sizeof(__max) >= 8 && __max == std::numeric_limits::max()) {
+ return _M_rand_uint64();
+ }
+ // Possible optimization: consume fewer random bits here.
+ return _M_rand_uint64() % (__max + 1);
+ }
+
+ size_type
+ _M_get_random_odd_number() const
+ { return _M_random_odd_number; }
+
+ private:
+ _RNG _M_rng;
+ size_type _M_random_odd_number; // This is modifed only when _M_rng is seeded.
+ };
+} // namespace cuckoo_internal
+
+// The _Cuckoo class for hashed associative containers.
+// _Value: what is stored in the table. E.g., for maps, this is a pair.
+// _Key: something in a 1-to-1 correspondence to a Value, that can be used
+// to search for a Value in the table (find() takes a Key).
+// _Hasher: hash function(s)
+// _Extract_key: given a Value, returns the unique Key associated with it.
+// Must have a result_type typedef indicating the return type
+// of operator().
+// _Equal_key: given two Keys, are they equal?
+// _Alloc: some memory allocations are performed via this.
+template
+ class _Cuckoo;
+
+template
+ class _Cuckoo_const_iterator;
+
+template
+ class _Cuckoo_iterator
+ {
+ private:
+ typedef typename _Alloc::template rebind<_Value>::other value_alloc_type;
+ typedef _Cuckoo<_Value, _Key, _Hasher, _Extract_key, _Equal_key, _Alloc,
+ _S_z> _Table;
+ typedef typename _Table::_Bucket_number _Bucket_number;
+ typedef typename _Table::_Slot _Slot;
+ friend class _Cuckoo<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>;
+ friend class _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>;
+
+ public:
+ typedef _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>
+ iterator;
+ typedef _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ const_iterator;
+
+ typedef std::forward_iterator_tag iterator_category;
+ typedef _Value value_type;
+ typedef typename value_alloc_type::difference_type difference_type;
+ typedef typename value_alloc_type::size_type size_type;
+ typedef typename value_alloc_type::const_reference reference;
+ typedef typename value_alloc_type::const_pointer pointer;
+
+ // Constructors
+ private:
+ _Cuckoo_iterator(const _Table* __h, _Bucket_number __pos, bool __advance)
+ {
+ _M_ht = const_cast<_Table*>(__h);
+ _M_b = __pos;
+ if (__advance)
+ {
+ _M_advance_past_empty();
+ }
+ else
+ {
+ assert(_M_b < _M_ht->bucket_count());
+ _M_val = const_cast<_Value*>(_M_ht->_M_slots[_M_b].get_ptr());
+ }
+ }
+
+ public:
+ _Cuckoo_iterator() : _M_val(nullptr), _M_ht(nullptr), _M_b(0) { }
+
+ // The default destructor is fine; we don't define one
+ // The default operator= is fine; we don't define one
+
+ // Dereference
+ reference
+ operator*() const
+ { return *_M_val; }
+
+ pointer
+ operator->() const
+ { return &(operator*()); }
+
+ // Move past empty slots and bookkeeping slots.
+ void
+ _M_advance_past_empty()
+ {
+ const _Bucket_number __n = _M_ht->bucket_count();
+ while (_M_b < __n && _M_ht->_M_slots[_M_b].has_no_element())
+ {
+ ++_M_b;
+ }
+ _M_val = const_cast<_Value*>(_M_b < __n ? _M_ht->_M_slots[_M_b].get_ptr()
+ : nullptr);
+ }
+
+ iterator&
+ operator++()
+ {
+ _M_update();
+ ++_M_b;
+ _M_advance_past_empty();
+ return *this;
+ }
+
+ iterator
+ operator++(int)
+ {
+ iterator __tmp(*this);
+ ++*this;
+ return __tmp;
+ }
+
+ // Comparison.
+ bool
+ operator==(const iterator& __it) const
+ { return _M_val == __it._M_val; }
+
+ bool
+ operator!=(const iterator& __it) const
+ { return _M_val != __it._M_val; }
+
+ private:
+ // Conversion of const_iterator to iterator. This is private because it is
+ // intended for internal use only.
+ _Cuckoo_iterator(const const_iterator& __it)
+ : _M_val(const_cast<_Value*>(__it._M_val)),
+ _M_ht(const_cast<_Table*>(__it._M_ht)),
+ _M_b(__it._M_b)
+ { }
+
+ void
+ _M_update()
+ {
+ _M_b = _M_ht->_M_mod_bucket_count(_M_b);
+ if (_M_ht->_M_slots[_M_b].get_ptr_raw()
+ != reinterpret_cast(_M_val))
+ {
+ // _M_b is stale. Use _M_val to find correct bucket number.
+ _M_b = _M_ht->find(_M_ht->get_key(*_M_val))._M_b;
+ assert(_M_val == _M_ht->_M_slots[_M_b].get_ptr());
+ }
+ }
+
+ _Value* _M_val;
+ _Table* _M_ht;
+ _Bucket_number _M_b;
+ };
+
+template
+ struct _Cuckoo_const_iterator
+ {
+ private:
+ typedef typename _Alloc::template rebind<_Value>::other value_alloc_type;
+ typedef _Cuckoo<_Value, _Key, _Hasher, _Extract_key, _Equal_key, _Alloc,
+ _S_z> _Table;
+ typedef typename _Table::_Bucket_number _Bucket_number;
+ typedef typename _Table::_Slot _Slot;
+ friend class _Cuckoo<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>;
+ friend class _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>;
+
+ public:
+ typedef _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>
+ iterator;
+ typedef _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ const_iterator;
+
+ typedef std::forward_iterator_tag iterator_category;
+ typedef _Value value_type;
+ typedef typename value_alloc_type::difference_type difference_type;
+ typedef typename value_alloc_type::size_type size_type;
+ typedef typename value_alloc_type::const_reference reference;
+ typedef typename value_alloc_type::const_pointer pointer;
+
+ private:
+ _Cuckoo_const_iterator(const _Table* __h,
+ _Bucket_number __pos,
+ bool __advance)
+ {
+ _M_ht = const_cast<_Table*>(__h);
+ _M_b = __pos;
+ if (__advance)
+ {
+ _M_advance_past_empty();
+ }
+ else
+ {
+ assert(_M_b < _M_ht->bucket_count());
+ _M_val = _M_ht->_M_slots[_M_b].get_ptr();
+ }
+ }
+
+ public:
+ _Cuckoo_const_iterator() : _M_val(nullptr), _M_ht(nullptr), _M_b(0) { }
+
+ // This lets us convert regular iterators to const iterators
+ _Cuckoo_const_iterator(const iterator& __it)
+ : _M_val(__it._M_val), _M_ht(__it._M_ht), _M_b(__it._M_b)
+ { }
+
+ reference
+ operator*() const
+ { return *_M_val; }
+
+ pointer
+ operator->() const
+ { return &(operator*()); }
+
+ // Advance _M_b zero or more times, if necessary. Set _M_val when done.
+ void
+ _M_advance_past_empty()
+ {
+ const _Bucket_number __n = _M_ht->bucket_count();
+ while (_M_b < __n && _M_ht->_M_slots[_M_b].has_no_element())
+ {
+ ++_M_b;
+ }
+ _M_val = _M_b < __n ? _M_ht->_M_slots[_M_b].get_ptr() : nullptr;
+ }
+
+ const_iterator&
+ operator++()
+ {
+ _M_update();
+ ++_M_b;
+ _M_advance_past_empty();
+ return *this;
+ }
+
+ const_iterator
+ operator++(int)
+ {
+ const_iterator __tmp(*this);
+ ++*this;
+ return __tmp;
+ }
+
+ // Comparison.
+ bool
+ operator==(const const_iterator& __it) const
+ { return _M_val == __it._M_val; }
+
+ bool
+ operator!=(const const_iterator& __it) const
+ { return _M_val != __it._M_val; }
+
+ private:
+ void
+ _M_update()
+ {
+ _M_b = _M_ht->_M_mod_bucket_count(_M_b);
+ if (_M_ht->_M_slots[_M_b].get_ptr_raw()
+ != reinterpret_cast(_M_val))
+ {
+ // _M_b is stale. Use _M_val to find correct bucket number.
+ _M_b = _M_ht->find(_M_ht->get_key(*_M_val))._M_b;
+ assert(_M_val == _M_ht->_M_slots[_M_b].get_ptr());
+ }
+ }
+
+ private:
+ const _Value* _M_val;
+ const _Table* _M_ht;
+ _Bucket_number _M_b;
+ };
+
+template
+ class _Cuckoo_const_local_iterator;
+
+template
+ class _Cuckoo_local_iterator
+ {
+ private:
+ typedef typename _Alloc::template rebind<_Value>::other value_alloc_type;
+ friend class _Cuckoo<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>;
+ friend class _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>;
+ friend class _Cuckoo_const_local_iterator<_Value, _Key, _Hasher,
+ _Extract_key, _Equal_key, _Alloc, _S_z>;
+
+ public:
+ typedef _Cuckoo_local_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ local_iterator;
+ typedef _Cuckoo_const_local_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ const_local_iterator;
+ typedef _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>
+ iterator;
+ typedef _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ const_iterator;
+
+ typedef std::forward_iterator_tag iterator_category;
+ typedef _Value value_type;
+ typedef typename value_alloc_type::difference_type difference_type;
+ typedef typename value_alloc_type::size_type size_type;
+ typedef typename value_alloc_type::const_reference reference;
+ typedef typename value_alloc_type::const_pointer pointer;
+
+ // Constructors
+ private:
+ explicit _Cuckoo_local_iterator(iterator __i) : _M_i(__i) { }
+
+ public:
+ _Cuckoo_local_iterator() { }
+
+ // Dereference
+ reference
+ operator*() const
+ { return *_M_i; }
+
+ pointer
+ operator->() const
+ { return &(operator*()); }
+
+ local_iterator&
+ operator++()
+ {
+ ++_M_i;
+ return *this;
+ }
+
+ local_iterator
+ operator++(int)
+ {
+ local_iterator __tmp(*this);
+ ++*this;
+ return __tmp;
+ }
+
+ // Comparison.
+ bool
+ operator==(const local_iterator& __it) const
+ { return _M_i == __it._M_i; }
+
+ bool
+ operator!=(const local_iterator& __it) const
+ { return _M_i != __it._M_i; }
+
+ private:
+ // Conversion of const_iterator to iterator. This is private because it is
+ // intended for internal use only.
+ _Cuckoo_local_iterator(const const_local_iterator& __it) : _M_i(__it._M_i)
+ { }
+
+ iterator _M_i;
+ };
+
+template
+ struct _Cuckoo_const_local_iterator
+ {
+ private:
+ typedef typename _Alloc::template rebind<_Value>::other value_alloc_type;
+ friend class _Cuckoo<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>;
+ friend class _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>;
+ friend class _Cuckoo_local_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>;
+
+ public:
+ typedef _Cuckoo_local_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ local_iterator;
+ typedef _Cuckoo_const_local_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ const_local_iterator;
+ typedef _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>
+ iterator;
+ typedef _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>
+ const_iterator;
+
+ typedef std::forward_iterator_tag iterator_category;
+ typedef _Value value_type;
+ typedef typename value_alloc_type::difference_type difference_type;
+ typedef typename value_alloc_type::size_type size_type;
+ typedef typename value_alloc_type::const_reference reference;
+ typedef typename value_alloc_type::const_pointer pointer;
+
+ private:
+ _Cuckoo_const_local_iterator(const iterator& __it) : _M_i(__it) { }
+
+ public:
+ _Cuckoo_const_local_iterator() { }
+
+ // This lets us convert regular local iterators to const local iterators
+ _Cuckoo_const_local_iterator(const local_iterator& __it) : _M_i(__it._M_i)
+ { }
+
+ reference
+ operator*() const
+ { return *_M_i; }
+
+ pointer
+ operator->() const
+ { return &(operator*()); }
+
+ const_local_iterator&
+ operator++()
+ {
+ ++_M_i;
+ return *this;
+ }
+
+ const_local_iterator
+ operator++(int)
+ {
+ const_local_iterator __tmp(*this);
+ ++*this;
+ return __tmp;
+ }
+
+ // Comparison.
+ bool
+ operator==(const const_local_iterator& __it) const
+ { return _M_i == __it._M_i; }
+
+ bool
+ operator!=(const const_local_iterator& __it) const
+ { return _M_i != __it._M_i; }
+
+ private:
+ const_iterator _M_i;
+ };
+
+template
+ class _Cuckoo
+ {
+ private:
+ enum
+ {
+ _S_random_hunt_max_iters = 10
+ };
+#ifndef NDEBUG
+ enum
+ {
+ _S_debug = true
+ };
+#else
+ enum
+ {
+ _S_debug = false
+ };
+#endif
+ enum
+ {
+ _S_easy = true,
+ _S_hard = false
+ };
+ enum
+ {
+ _S_copy = true,
+ _S_no_copy = false
+ };
+ enum
+ {
+ _S_delete = true,
+ _S_no_delete = false
+ };
+ enum
+ {
+ _S_advance = true,
+ _S_no_advance = false
+ };
+ enum
+ {
+ _S_did_insert = true,
+ _S_did_not_insert = false
+ };
+#if __cpp_exceptions
+ enum
+ {
+ _S_exceptions_enabled = true
+ };
+#else
+ enum
+ {
+ _S_exceptions_enabled = false
+ };
+#endif
+ enum
+ {
+ _S_hard_if_exceptions_enabled = _S_exceptions_enabled ? _S_hard : _S_easy
+ };
+
+ typedef typename _Alloc::template rebind<_Value>::other value_alloc_type;
+
+ public:
+ typedef _Key key_type;
+ typedef _Value value_type;
+ typedef _Hasher hasher;
+ typedef _Equal_key key_equal;
+ typedef _Alloc allocator_type;
+
+ typedef typename value_alloc_type::size_type size_type;
+ typedef typename value_alloc_type::difference_type difference_type;
+ typedef typename value_alloc_type::reference reference;
+ typedef typename value_alloc_type::const_reference const_reference;
+ typedef typename value_alloc_type::pointer pointer;
+ typedef typename value_alloc_type::const_pointer const_pointer;
+ typedef cuckoo_internal::_Bucket_number _Bucket_number;
+ typedef cuckoo_internal::_RNGState _RNGState;
+ typedef _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key, _Alloc, _S_z>
+ iterator;
+ typedef _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key, _Alloc,
+ _S_z> const_iterator;
+ typedef _Cuckoo_local_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key, _Alloc,
+ _S_z> local_iterator;
+ typedef _Cuckoo_const_local_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z> const_local_iterator;
+
+ private:
+ struct _Resize_request { }; // for "throw _Resize_request()"
+ typedef cuckoo_internal::_Permutation_iterator _Permutation_iterator;
+ typedef uintptr_t hint;
+ friend class _Cuckoo_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key, _Alloc,
+ _S_z>;
+ friend class _Cuckoo_const_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>;
+ friend class _Cuckoo_local_iterator<_Value, _Key, _Hasher, _Extract_key, _Equal_key,
+ _Alloc, _S_z>;
+ friend class _Cuckoo_const_local_iterator<_Value, _Key, _Hasher, _Extract_key,
+ _Equal_key, _Alloc, _S_z>;
+
+ // Helper to keep track of changes to _M_slots.
+ // Handy for implementing commit-or-rollback semantics.
+ struct _Rollback_state {
+ struct Record {
+ uintptr_t* ptr;
+ uintptr_t old_value;
+ };
+
+ // TODO: use allocator?
+ typedef std::vector Records;
+
+ void push_back(uintptr_t& u) { v_.push_back(Record{&u, u}); }
+ typename Records::size_type size() const { return v_.size(); }
+ Record& at(typename Records::size_type index) { return v_.at(index); }
+
+ Records v_;
+ std::vector _M_vals_created;
+
+ template void _M_rollbackable_write(_V& ref, _V val) {
+ static_assert(sizeof(val) == sizeof(uintptr_t), "sizeof(val) assumption");
+ uintptr_t u;
+ memcpy(&u, &ref, sizeof(val));
+ v_.push_back(Record{reinterpret_cast(&ref), u});
+ ref = val;
+ }
+
+ size_type _M_num_elements;
+ size_type _M_num_non_element_slots;
+ };
+
+ _Rollback_state* _M_get_rollback_state() {
+#if __cpp_exceptions
+ return _M_rollback_state;
+#else
+ return nullptr;
+#endif
+ }
+
+ void _M_clear_rollback_state() {
+#if __cpp_exceptions
+ _M_rollback_state = nullptr;
+#endif
+ }
+
+ template void _M_rollbackable_write(_V& ref, _V val) {
+ if (_M_get_rollback_state() != nullptr) {
+ _M_get_rollback_state()->_M_rollbackable_write(ref, val);
+ } else {
+ ref = val;
+ }
+ }
+
+ // Conceptually these are the key/value pairs for a map: g1 and g2
+ // are the "key" and the rest is the "value." See above for details.
+ struct _Extension
+ {
+ size_type g1;
+ size_type g2;
+ // To use i0 and i1 we need to create a _Permutation_iterator of a specific
+ // length; the length is determined by whether this is a first Extension
+ // for g1, g2, or a second, or a third, etc. The code traversing the chain
+ // keeps track, so we don't need to record that here.
+ _Bucket_number i0;
+ _Bucket_number i1;
+
+ bool Matches(size_type x1, size_type x2) const {
+ return x1 == g1 && x2 == g2;
+ }
+ };
+
+ // Extensions are linked into two separate linked lists. One is the
+ // list of all Extensions that we keep for bookkeeping purposes. The other is
+ // the list of Extensions for a particular bucket.
+ struct _Extension_list
+ {
+ _Extension_list* _M_next; // Bookkeeping list of all Extensions
+ _Extension_list* _M_next_in_chain; // Per-bucket list of Extensions
+ _Extension _M_extension;
+ };
+
+ struct _Slot
+ {
+ enum
+ {
+ _S_bitset_capacity = 7,
+ _S_max_bitset = (1 << _S_bitset_capacity) - 1,
+ _S_bits = _S_bitset_capacity + 3,
+ _S_width = sizeof(const_pointer) * 8, // Size of a pointer, in bits.
+ _S_top_mask = ((static_cast(1) << _S_bits) - 1) << (_S_width - _S_bits),
+ _S_bitset_mask = _S_top_mask << 3,
+ _S_ptr_mask = ~_S_top_mask,
+ // The next three masks are powers of two, i.e., all bits zero except one.
+ _S_contains_extension_list_mask = _S_top_mask ^ (_S_top_mask << 1),
+ _S_invalid_bitset_mask = _S_contains_extension_list_mask << 2,
+ // Which bits specify the type of pointer in the _Slot?
+ _S_pointer_type_mask = _S_contains_extension_list_mask
+ };
+
+ _Slot() : p_(0) { }
+
+ uintptr_t get_ptr_raw() const {
+#if defined(__x86_64__)
+ uintptr_t p = p_ << _S_bits;
+ asm("sar %1, %0"
+ : "+r"(p)
+ : "i"(_S_bits)
+ : "cc");
+ return p;
+#else
+ assert(0 && "not implemented");
+ return 0;
+#endif
+ }
+
+ const_pointer get_ptr() const {
+ assert(can_point_to_element());
+ return reinterpret_cast(get_ptr_raw());
+ }
+
+ bool can_point_to_element() const { return (p_ & _S_pointer_type_mask) == 0; }
+ bool is_overfull() const { return !has_bitset(); }
+ bool contains_extension_list() const {
+ return (p_ & _S_contains_extension_list_mask) != 0;
+ }
+ bool has_bitset() const {
+ return (p_ & _S_invalid_bitset_mask) == 0;
+ }
+
+ hint get_bitset() const {
+ assert(has_bitset());
+ hint h = p_ >> (_S_width - _S_bitset_capacity);
+ return h;
+ }
+
+ void set_bitset(hint h) {
+ assert(has_bitset());
+ p_ <<= _S_bitset_capacity;
+ p_ |= h;
+ p_ = bitwise_right_rotate(p_, _S_bitset_capacity);
+ assert(get_bitset() == h);
+ }
+
+ void clear_bitset(bool easy, _Rollback_state* rs) {
+ assert(has_bitset());
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ p_ &= ~_S_bitset_mask;
+ assert(get_bitset() == 0);
+ }
+
+ void set_overfull(bool easy, _Rollback_state* rs) {
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ p_ |= _S_invalid_bitset_mask;
+ assert(is_overfull());
+ }
+
+ void xor_bitset(hint t, bool easy, _Rollback_state* rs) {
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ const hint old_bitset = get_bitset();
+ set_bitset(old_bitset ^ t);
+ assert(get_bitset() == old_bitset ^ t);
+ }
+
+ bool is_ptr_null() const {
+ bool result = (p_ & _S_ptr_mask) == 0;
+ assert(result == (get_ptr_raw() == 0));
+ return result;
+ }
+
+ bool is_available() const {
+ bool result = (p_ & (_S_ptr_mask | _S_pointer_type_mask)) == 0;
+ assert(result == (is_ptr_null() && can_point_to_element()));
+ return result;
+ }
+
+ bool has_element() const {
+ return can_point_to_element() && !is_ptr_null();
+ }
+
+ bool has_no_element() const {
+ return !has_element();
+ }
+
+ void set_ptr_that_was_null(const_pointer p, bool easy, _Rollback_state* rs) {
+ assert(get_ptr() == nullptr);
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ p_ |= reinterpret_cast(p) & _S_ptr_mask;
+ assert(get_ptr() == p);
+ }
+
+ void set_ptr_and_bitset(const_pointer p, hint bitset, bool easy,
+ _Rollback_state* rs) {
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ p_ = (reinterpret_cast(p) & _S_ptr_mask) |
+ (bitset << (_S_width - _S_bitset_capacity));
+ assert(get_bitset() == bitset && get_ptr() == p);
+ }
+
+ void set_ptr_and_mark_as_extension_list(_Extension_list* p, bool easy,
+ _Rollback_state* rs) {
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ p_ &= ~_S_ptr_mask;
+ p_ |= (reinterpret_cast(p) & _S_ptr_mask) |
+ _S_contains_extension_list_mask;
+ assert(get_extension_listptr() == p && !can_point_to_element());
+ }
+
+ void set_ptr_to_null(bool easy, _Rollback_state* rs) {
+ if (!easy) {
+ rs->push_back(p_);
+ }
+ p_ &= ~_S_ptr_mask;
+ assert(get_ptr() == nullptr);
+ }
+
+ _Extension_list* get_extension_listptr() const {
+ return reinterpret_cast<_Extension_list*>(get_ptr_raw());
+ }
+
+ private:
+ static uintptr_t bitwise_right_rotate(uintptr_t val, unsigned int shift) {
+ const int kNumBits = 8 * sizeof(val);
+ return (val >> shift) | (val << (kNumBits - shift));
+ }
+
+ uintptr_t p_;
+ };
+
+ // Utilities for singly-linked lists.
+ template
+ inline void
+ PopListUntilHeadIs(_V* __h, _V** __pointer_to_list)
+ {
+ while (*__pointer_to_list != __h)
+ {
+ auto __next = (*__pointer_to_list)->_M_next;
+ delete *__pointer_to_list;
+ *__pointer_to_list = __next;
+ }
+ }
+
+ template
+ inline void
+ PopAll(_V** __pointer_to_list)
+ { PopListUntilHeadIs(static_cast<_V*>(nullptr), __pointer_to_list); }
+
+ public:
+ // How full we let the table get before we resize, by default.
+ enum { _S_default_max_occupancy_percent = 60 };
+
+ // How empty we let the table get before we resize lower, by default.
+ // (0 means never resize lower.)
+ // It should be less than half of the corresponding upper bound, or
+ // we risk doing a lot of doubling/halving the table size for no good reason.
+ enum { _S_default_consider_shrink_occupancy =
+ _S_default_max_occupancy_percent * 2 / 5 };
+
+ // Minimum size we're willing to let hashtables be. Must be a power of two,
+ // and at least max(_S_z, _Slots::_S_bitset_capacity). Note, however, that for a
+ // given hashtable, the initial size is a function of the first constructor
+ // arg, and may be larger than _S_min_buckets.
+ static const size_type _S_min_buckets =
+ cuckoo_internal::SmallestPowerOfTwoNotLessThan<
+ cuckoo_internal::_Max<_S_z, _Slot::_S_bitset_capacity>::_S_value>::_S_value;
+
+ // By default, if you don't specify a hashtable size at
+ // construction-time, we use this size. Must be a power of two, and
+ // at least _S_min_buckets.
+ static const size_type _S_default_starting_buckets
+ = cuckoo_internal::_Max<32, _S_min_buckets>::_S_value;
+
+ // ITERATOR FUNCTIONS
+ // TODO: should we keep track of what begin() would return, so we don't have
+ // to iterate when begin() is invoked? It seems unhelpful on balance, but may
+ // be required by the C++ standard.
+ iterator begin() noexcept { return iterator(this, 0, _S_advance); }
+ iterator end() noexcept { return iterator(); }
+ const_iterator begin() const noexcept {
+ return const_iterator(this, 0, _S_advance);
+ }
+ const_iterator end() const noexcept { return const_iterator(); }
+ const_iterator cbegin() const noexcept { return begin(); }
+ const_iterator cend() const noexcept { return end(); }
+
+ // LOCAL ITERATORS / BUCKET INTERFACE
+ size_type bucket_size(size_type b) const noexcept {
+ return b == 0 ? size() : 0;
+ }
+ local_iterator begin(size_type b) noexcept {
+ return local_iterator(b == 0 ? begin() : end());
+ }
+ local_iterator end(size_type b) noexcept { return local_iterator(end()); }
+ const_local_iterator begin(size_type b) const noexcept {
+ return const_local_iterator(b == 0 ? begin() : end());
+ }
+ const_local_iterator end(size_type b) const noexcept { return const_local_iterator(end()); }
+ const_local_iterator cbegin(size_type n) const noexcept { return begin(n); }
+ const_local_iterator cend(size_type n) const noexcept { return end(n); }
+
+ // ACCESSOR FUNCTIONS for the things we templatize on, basically
+ hasher hash_function() const { return _M_settings; }
+ key_equal key_eq() const { return _M_key_info; }
+ allocator_type get_allocator() const { return allocator_type(_M_value_alloc); }
+
+ // Accessor function for statistics gathering.
+ int num_table_copies() const { return _M_settings._M_num_copies(); }
+
+ // FUNCTIONS CONCERNING SIZE
+ size_type size() const { return _M_num_elements; };
+ bool empty() const { return size() == 0; }
+ size_type bucket_count() const { return _M_num_buckets; }
+ size_type max_bucket_count() const { return max_size(); }
+ size_type max_size() const {
+ return std::min(_M_value_alloc.max_size(),
+ std::numeric_limits::max() / 8);
+ }
+
+ bool allow_rebucketing(bool new_state) {
+ std::swap(new_state, _M_allow_rebucketing);
+ return new_state;
+ }
+ bool allow_shrinking(bool new_state) {
+ std::swap(new_state, _M_allow_shrinking);
+ return new_state;
+ }
+
+ private:
+ enum { _S_impossible_bucket = ~(_Bucket_number)0 };
+
+ size_type _M_random_odd_number() const {
+ return _M_rng_state->_M_get_random_odd_number();
+ }
+ // In non-const context, create the RNG if need be.
+ size_type _M_random_odd_number() {
+ initialize_rng();
+ return _M_rng_state->_M_get_random_odd_number();
+ }
+
+ // "Make it big enough for delta more elements." Also, check if we've hit
+ // _M_work_threshold. If the table size is fine already then this does nothing.
+ // Returns whether a resize did occur.
+ bool _M_resize_delta(size_type delta) {
+ assert(bucket_count() >= _S_min_buckets);
+ if (!_M_allow_rebucketing) {
+ _M_work_since_last_rebuild =
+ std::min(_M_work_threshold, _M_work_since_last_rebuild);
+ return false;
+ }
+ if (_M_num_elements >=
+ (std::numeric_limits::max)() - delta) {
+#if __cpp_exceptions
+ throw std::length_error("resize overflow");
+#else
+ assert(0 && "resize overflow");
+#endif
+ }
+
+ size_type min_size_to_consider = _S_min_buckets;
+ // Sometimes, we want to resize just to reduce the probability that
+ // a long sequence of inserts and erases has led to inopportune
+ // clustering of elements in the table. That's tracked by _M_work_threshold.
+ if (_M_work_since_last_rebuild < _M_work_threshold) {
+ if (_M_slots_in_use() + delta <= _M_settings._M_enlarge_threshold) {
+ return false;
+ } else {
+ // We passed the enlarge threshold. Doubling is probably a good idea.
+ min_size_to_consider = bucket_count() * 2;
+ }
+ }
+
+ if (!_M_allow_shrinking) {
+ min_size_to_consider = bucket_count();
+ }
+ size_type resize_to =
+ _M_settings.min_buckets(_M_num_elements + delta, min_size_to_consider);
+
+ force_resize(resize_to);
+ return true;
+ }
+
+ void force_resize(size_type resize_to) {
+ for (;;) {
+ __try {
+ _Cuckoo tmp(*this, resize_to);
+ swap(tmp);
+ return;
+ } __catch (_Resize_request&) {
+ lower_max_load_factor();
+ }
+ }
+ }
+
+ // According to the C++ standard, a load factor is just a suggestion, and
+ // the data structure is allowed to do whatever it likes. So, in certain
+ // unusual cases we unilaterally target a lower load factor.
+ float lower_max_load_factor() {
+ const float default_mlf = _S_default_max_occupancy_percent / 100.0f;
+ float mlf = _M_settings._M_enlarge_factor;
+ mlf = .97f * mlf + .03f * default_mlf;
+ set_resizing_parameters(_M_settings._M_shrink_factor, mlf);
+ _M_settings._M_reset_thresholds(bucket_count());
+ return mlf;
+ }
+
+
+ // Used to actually do the rehashing when we grow/shrink a hashtable.
+ // No new _Values are allocated; the existing ones are reused.
+ void move_from(_Cuckoo &ht, size_type min_buckets_wanted) {
+ clear_to_size(_M_settings.min_buckets(ht.size(), min_buckets_wanted),
+ ht._M_rng_state);
+ size_type num_buckets = bucket_count();
+ {
+ _Disallow_shrink_holder __dsh(&_M_allow_shrinking);
+ for (const_iterator it = ht.begin(); it != ht.end(); ++it) {
+ _M_insert_unique(*it, _S_easy);
+ assert(bucket_count() == num_buckets);
+ }
+ }
+ _Slot* start = ht._M_slots;
+ _Slot* end = start + ht._M_num_buckets;
+ memset(start, 0, (end - start) * sizeof(*start));
+ ht._M_num_elements = 0;
+ ht._M_num_non_element_slots = 0;
+ _M_settings._M_increment_num_copies();
+ }
+
+ void copy_from(const _Cuckoo &ht, size_type min_buckets_wanted) {
+ clear_to_size(_M_settings.min_buckets(ht.size(), min_buckets_wanted),
+ ht._M_rng_state);
+ size_type num_buckets = bucket_count();
+ {
+ _Disallow_shrink_holder __dsh(&_M_allow_shrinking);
+ for (const_iterator it = ht.begin(); it != ht.end(); ++it) {
+ _M_insert_unique(*new_value(*it, _S_easy), _S_easy);
+ assert(bucket_count() >= num_buckets);
+ }
+ }
+ _M_settings._M_increment_num_copies();
+ assert(size() == ht.size());
+ }
+
+ public:
+ // CONSTRUCTORS -- as required by the specs, we take a size,
+ // but also let you specify a hash function, key comparator,
+ // and key extractor. We also define a copy constructor and =.
+ // DESTRUCTOR -- needs to free the table
+ explicit _Cuckoo(size_type expected_max_items_in_table = 0,
+ const _Hasher& hf = _Hasher(), const _Equal_key& eql = _Equal_key(),
+ const _Extract_key& ext = _Extract_key(),
+ const _Alloc& alloc = _Alloc())
+ : _M_slots(nullptr),
+ _M_settings(hf),
+ _M_key_info(ext, eql),
+ _M_num_elements(0),
+ _M_num_non_element_slots(0),
+ _M_num_buckets(
+ expected_max_items_in_table == 0
+ ? _S_default_starting_buckets
+ : _M_settings.min_buckets(expected_max_items_in_table, 0)),
+ _M_value_alloc(alloc),
+ _M_allow_shrinking(true),
+ _M_allow_rebucketing(true),
+ _M_rng_state(nullptr) {
+ _M_clear_rollback_state();
+ clear_to_size(bucket_count());
+ }
+
+ _Cuckoo(const _Cuckoo& ht) : _Cuckoo() { *this = ht; }
+
+ explicit _Cuckoo(const _Alloc& alloc)
+ : _Cuckoo(0, _Hasher(), _Equal_key(), _Extract_key(), alloc)
+ { }
+
+ _Cuckoo(const _Cuckoo& ht, const _Alloc& alloc) : _Cuckoo(alloc) { *this = ht; }
+
+ // Move the elements to a new table. This is used by table resizing and
+ // the move constructor.
+ _Cuckoo(_Cuckoo& ht, size_type min_buckets_wanted = _S_default_starting_buckets)
+ : _M_slots(nullptr),
+ _M_settings(ht._M_settings),
+ _M_key_info(ht._M_key_info),
+ _M_num_elements(0),
+ _M_num_non_element_slots(0),
+ _M_num_buckets(0),
+ _M_value_alloc(ht._M_value_alloc),
+ _M_allow_shrinking(true),
+ _M_allow_rebucketing(true),
+ _M_rng_state(nullptr)
+ {
+ _M_clear_rollback_state();
+ __try
+ {
+ move_from(ht, min_buckets_wanted);
+ }
+ __catch (...)
+ {
+ cleanup(_S_no_delete);
+ __throw_exception_again;
+ }
+ }
+
+ _Cuckoo(_Cuckoo&& ht) : _Cuckoo(ht, ht.bucket_count()) { }
+
+ _Cuckoo&
+ operator=(const _Cuckoo& ht)
+ {
+ if (&ht == this) return *this; // Don't copy onto ourselves.
+ cleanup(_S_delete);
+ _M_slots = nullptr;
+ _M_settings = ht._M_settings;
+ _M_key_info = ht._M_key_info;
+ _M_num_elements = 0;
+ _M_num_non_element_slots = 0;
+ _M_work_since_last_rebuild = 0;
+ _M_work_threshold = 0;
+ assert(_M_get_rollback_state() == nullptr);
+ _M_extension_list = nullptr;
+ // TODO: copy the allocator if need be
+ _M_allow_shrinking = ht._M_allow_shrinking;
+ _M_allow_rebucketing = ht._M_allow_rebucketing;
+ _M_rng_state = nullptr;
+ copy_from(ht, _S_min_buckets);
+ return *this;
+ }
+
+ _Cuckoo&
+ operator=(std::initializer_list il)
+ {
+ clear();
+ insert(il);
+ return *this;
+ }
+
+ ~_Cuckoo() { cleanup(_S_delete); }
+
+ private:
+ size_type
+ _M_slots_in_use() const
+ { return _M_num_elements + _M_num_non_element_slots; }
+
+ // This is intended for debug-mode sanity checks only.
+ size_type
+ count_non_element_slots() const
+ {
+ size_type __count = 0;
+ for (size_type __i = 0; __i < _M_num_buckets; __i++) {
+ if (_M_slots[__i].contains_extension_list()) {
+ assert(_M_slots[__i].get_extension_listptr() != nullptr);
+ ++__count;
+ }
+ }
+ return __count;
+ }
+
+ void cleanup(bool del) {
+ assert(_M_slots != nullptr);
+ //TODO: use allocator to delete _Values
+ //TODO: use allocator to delete _M_slots etc.
+ if (del) {
+ for (const_reference r : *this) {
+ _M_delete_value(r);
+ }
+ }
+ assert(_M_num_non_element_slots == count_non_element_slots());
+ delete[] _M_slots;
+ destroy_bookkeeping_data();
+ delete _M_rng_state;
+ }
+
+ public:
+ void swap(_Cuckoo& ht) noexcept {
+ if (this == &ht) return;
+ assert(_M_get_rollback_state() == nullptr);
+ assert(ht._M_get_rollback_state() == nullptr);
+ std::swap(_M_slots, ht._M_slots);
+ std::swap(_M_settings, ht._M_settings);
+ std::swap(_M_key_info, ht._M_key_info);
+ std::swap(_M_num_elements, ht._M_num_elements);
+ std::swap(_M_num_non_element_slots, ht._M_num_non_element_slots);
+ std::swap(_M_num_buckets, ht._M_num_buckets);
+ std::swap(_M_work_since_last_rebuild, ht._M_work_since_last_rebuild);
+ std::swap(_M_work_threshold, ht._M_work_threshold);
+ std::swap(_M_extension_list, ht._M_extension_list);
+ std::swap(_M_rng_state, ht._M_rng_state);
+ _M_settings._M_reset_thresholds(bucket_count());
+ ht._M_settings._M_reset_thresholds(ht.bucket_count());
+ // TODO: swap allocator if need be
+ }
+
+ // If hint > _M_num_buckets then force a resize, even if
+ // _M_allow_rebucketing is false.
+ void rehash(size_type hint) {
+ hint = std::min(hint, max_size());
+ if (hint <= _M_num_buckets) return;
+ size_type n = _M_num_buckets;
+ do {
+ n *= 2;
+ } while (n < hint);
+ force_resize(n);
+ }
+
+ void reserve(size_type hint) { rehash(ceil(hint / max_load_factor())); }
+
+ // Get and change the value of shrink_factor and enlarge_factor. The
+ // description at the beginning of this file explains how to choose
+ // the values. Setting the shrink parameter to 0.0 ensures that the
+ // table never shrinks.
+ void get_resizing_parameters(float* shrink, float* grow) const {
+ *shrink = _M_settings._M_shrink_factor;
+ *grow = _M_settings._M_enlarge_factor;
+ }
+ void set_resizing_parameters(float shrink, float grow) {
+ _M_settings.set_resizing_parameters(shrink, grow);
+ _M_settings._M_reset_thresholds(bucket_count());
+ }
+
+ /// Returns the average number of elements per bucket.
+ float
+ load_factor() const noexcept
+ { return size() * 1.0f / bucket_count(); }
+
+ /// Returns a positive number that the %_CuckooS tries to keep the
+ /// load factor less than or equal to.
+ float
+ max_load_factor() const noexcept
+ {
+ float shrink, grow;
+ get_resizing_parameters(&shrink, &grow);
+ return grow;
+ }
+
+ /**
+ * @brief Change the %_CuckooS maximum load factor.
+ * @param __z The new maximum load factor.
+ */
+ void
+ max_load_factor(float __z)
+ {
+ float shrink, grow;
+ get_resizing_parameters(&shrink, &grow);
+ set_resizing_parameters(shrink, __z);
+ }
+
+ void set_random_seed(const std::seed_seq& s) {
+ assert(empty());
+ if (_M_extension_list != nullptr) {
+ clear();
+ }
+ assert(_M_extension_list == nullptr);
+ if (_M_rng_state == nullptr) {
+ _M_rng_state = new _RNGState(s);
+ } else {
+ _M_rng_state->_M_reset(s);
+ }
+ }
+
+ private:
+ // There are 2 states:
+ // 1. we have not yet used a RNG, and we have no seed
+ // 2. we have a RNG
+ // Sometimes no seed is given so we need to come up with a seed
+ // somehow. This is done via std::random_device. Unfortunately,
+ // that can throw an exception. However, std::random_device is
+ // avoided if either this table is derived from one that already has
+ // an RNG or set_random_seed() was invoked.
+ void initialize_rng(_RNGState* r = nullptr) {
+ if (_M_rng_state != nullptr) return;
+ if (r != nullptr) {
+ _M_rng_state = new _RNGState(*r);
+ } else {
+ std::random_device rd;
+ _M_rng_state = new _RNGState(std::seed_seq{rd(), rd()});
+ }
+ }
+
+ size_type _M_rand_at_most(size_type max) {
+ initialize_rng();
+ return _M_rng_state->_M_rand_at_most(max);
+ }
+
+ void clear_to_size(size_type new_num_buckets, _RNGState* r = nullptr) {
+ init_bookkeeping_data();
+ assert(_M_slots == nullptr);
+ _M_num_buckets = new_num_buckets;
+ _M_slots = new _Slot[_M_num_buckets];
+ _M_num_elements = 0;
+ clear_with_existing_slots(false);
+ initialize_rng(r);
+ }
+
+ void clear_with_existing_slots(bool resize_to_minimum) {
+ assert(_M_slots != nullptr);
+ if (_M_num_elements > 0) {
+ for (const_reference r : *this) {
+ _M_delete_value(r);
+ }
+ _M_num_elements = 0;
+ }
+ destroy_bookkeeping_data();
+ if (_M_allow_shrinking && _M_allow_rebucketing && resize_to_minimum &&
+ _M_num_buckets != _S_min_buckets) {
+ delete[] _M_slots;
+ _M_num_buckets = _S_min_buckets;
+ _M_slots = new _Slot[_M_num_buckets];
+ }
+ fill_range_with_empty(_M_slots, _M_slots + _M_num_buckets);
+ _M_work_since_last_rebuild = 0;
+ _M_work_threshold = 5 * _M_num_buckets; // why 5?
+ _M_settings._M_reset_thresholds(bucket_count());
+ }
+
+ void fill_range_with_empty(_Slot* start, _Slot* end) {
+ memset(start, 0, (end - start) * sizeof(*start));
+ }
+
+ public:
+ void clear() {
+ clear_with_existing_slots(true);
+ }
+
+ void clear_no_resize() {
+ clear_with_existing_slots(false);
+ }
+
+ // LOOKUP ROUTINES
+ iterator find(const key_type& key) {
+ const_iterator it = const_cast(this)->find(key);
+ return iterator(it);
+ }
+
+ const_iterator find(const key_type& __key) const {
+ _Find_result __fr = _M_find_if_present(__key);
+ return __fr._M_found ? const_iterator(this, __fr._M_b, _S_no_advance) : cend();
+ }
+
+ size_type bucket(const key_type&) const { return 0; }
+
+ // Counts how many elements match the given key: 0 or 1.
+ size_type count(const key_type &key) const {
+ return _M_find_if_present(key)._M_found ? 1 : 0;
+ }
+
+ std::pair equal_range(const key_type& key) {
+ iterator pos = find(key);
+ if (pos == end()) {
+ return std::pair(pos, pos);
+ } else {
+ const iterator startpos = pos++;
+ return std::pair(startpos, pos);
+ }
+ }
+ std::pair equal_range(const key_type& key)
+ const {
+ const_iterator pos = find(key);
+ if (pos == end()) {
+ return std::pair(pos, pos);
+ } else {
+ const const_iterator startpos = pos++;
+ return std::pair(startpos, pos);
+ }
+ }
+
+ private:
+ // This struct is used only as the return type of _M_find_if_present().
+ struct _Find_result {
+ // If the key was found then:
+ // found is true, b is the bucket where it was found, and bitset is 0.
+ // Otherwise:
+ // found is false, b is the preferred bucket for the key, and bitset is the
+ // bitset for that bucket, or -1 if it's overfull.
+ static _Find_result _S_found(size_type __g0, _Bucket_number __b) {
+ return {__g0, __b, 0, true};
+ }
+ static _Find_result _S_not_found(size_type __g0, _Bucket_number __b, int __bitset) {
+ return {__g0, __b, __bitset, false};
+ }
+ static _Find_result _S_not_found_overfull(size_type __g0, _Bucket_number __b) {
+ return {__g0, __b, -1, false};
+ }
+
+ size_type _M_g0;
+ _Bucket_number _M_b;
+ int _M_bitset;
+ bool _M_found;
+ };
+
+ _Find_result _M_find_if_present(const key_type& key) const {
+ const size_type g0 = _M_hash0(key);
+ const _Bucket_number b = _M_mod_bucket_count(g0);
+ if (_M_slots[b].has_bitset()) {
+ // Common case. We have a bitset telling us where to look.
+ const int bitset = _M_slots[b].get_bitset();
+ for (int lg_t = 0, t = 1; t <= bitset; lg_t++, t *= 2) {
+ if ((bitset & t) != 0) {
+ const _Bucket_number q = b ^ lg_t;
+ if (!_M_slots[q].is_ptr_null()
+ && _M_equals(key, get_key(*_M_slots[q].get_ptr()))) {
+ // Found a match.
+ return _Find_result::_S_found(g0, q);
+ }
+ }
+ }
+ return _Find_result::_S_not_found(g0, b, bitset);
+ } else {
+ // Bucket b is overfull.
+ // Possible nests for obj include, for example, for _S_z=4,
+ // h1, h1 ^ 1, h1 ^ 2, h1 ^ 3, h2, h2 ^ 1, h2 ^ 2, and h2 ^ 3.
+ const size_type g1 = _M_hash1(key, g0);
+ const _Bucket_number h1 = _M_mod_bucket_count(g1);
+ for (_Bucket_number i = 0; i < _S_z; i++) {
+ const _Bucket_number q = h1 ^ i;
+ if (_M_slots[q].has_element()
+ && _M_equals(key, get_key(*_M_slots[q].get_ptr()))) {
+ return _Find_result::_S_found(g0, q);
+ }
+ }
+ const size_type g2 = _M_hash2(key, g0);
+ const _Bucket_number h2 = _M_mod_bucket_count(g2);
+ for (_Bucket_number i = 0; i < _S_z; i++) {
+ const _Bucket_number q = h2 ^ i;
+ if (_M_slots[q].has_element()
+ && _M_equals(key, get_key(*_M_slots[q].get_ptr()))) {
+ return _Find_result::_S_found(g0, q);
+ }
+ }
+ // Check Extensions, if any.
+ auto p = extensions_if_any(g1, g2);
+ if (p.found_any) { // There's at least one relevant extension.
+ const _Extension_list* el = p.el;
+ const _Extension* e = &el->_M_extension;
+ // The first one is a special case: only e->i0 is used.
+ assert(e->Matches(g1, g2));
+ const _Bucket_number q = e->i0;
+ if (_M_slots[q].has_element()
+ && _M_equals(key, get_key(*_M_slots[q].get_ptr()))) {
+ return _Find_result::_S_found(g0, q);
+ }
+ // Search subsequent extensions, if any.
+ size_type num_to_search_per_extension = 2;
+ while ((el = el->_M_next_in_chain) != nullptr) {
+ e = &el->_M_extension;
+ if (!e->Matches(g1, g2)) break;
+ _Permutation_iterator pi(e->i0, e->i1, _M_num_buckets,
+ num_to_search_per_extension);
+ do {
+ const _Bucket_number q = pi._M_next();
+ if (_M_slots[q].has_element()
+ && _M_equals(key, get_key(*_M_slots[q].get_ptr()))) {
+ return _Find_result::_S_found(g0, q);
+ }
+ } while (!pi._M_done());
+ num_to_search_per_extension *= 2;
+ }
+ }
+ // Done searching all possible nests.
+ return _Find_result::_S_not_found_overfull(g0, b);
+ }
+ }
+
+ // INSERTION ROUTINES
+
+ // Use new. TODO: Fix.
+ pointer new_value(const_reference obj, bool easy) {
+ // Make space in _M_get_rollback_state() first, because we only want to
+ // copy obj if that succeeds.
+ pointer* p = _M_extend_rollback_statefor_new_values();
+ pointer result = new _Value(obj);
+ if (_S_exceptions_enabled && !easy) {
+ *p = result;
+ }
+ return result;
+ }
+ void _M_delete_value(const_reference obj) {
+ delete &obj;
+ }
+
+ // The basic find-or-insert method. Requires _M_get_rollback_state() == nullptr.
+ template
+ std::pair
+ _M_find_or_insert(_V&& __t, bool __copy)
+ {
+ assert(_M_get_rollback_state() == nullptr);
+ _Find_result __fr = _M_find_if_present(get_key(__t));
+ if (__fr._M_found)
+ {
+ return std::make_pair(_M_iter_at(__fr._M_b), _S_did_not_insert);
+ }
+ const size_type __g0 = __fr._M_g0;
+ const _Bucket_number __b = __fr._M_b;
+ const int __bitset = __fr._M_bitset;
+ const_pointer __p = &__t;
+ if (__copy)
+ {
+ __p = new value_type(std::forward<_V>(__t));
+ }
+ __try
+ {
+ return __bitset >= 0
+ ? _M_insert_unique_with_bitset(*__p, __g0, __b, __bitset, _S_easy)
+ : _M_cuckoo_insert_unique(*__p, __g0, __b, _S_easy);
+ }
+ __catch (...)
+ {
+ if (__copy)
+ {
+ _M_delete_value(*__p);
+ }
+ __throw_exception_again;
+ }
+ }
+
+ // Returns true if it did resize, false otherwise.
+ // Can throw...
+ bool bump_work_count_and_maybe_resize(bool easy) {
+ ++_M_work_since_last_rebuild;
+ if (_M_work_since_last_rebuild >= _M_work_threshold) {
+ if (!_M_allow_rebucketing) {
+ _M_work_since_last_rebuild = _M_work_threshold;
+ return false;
+ }
+ if (easy) {
+ bool did_resize = _M_resize_delta(0);
+ assert(did_resize);
+ assert(_M_get_rollback_state() == nullptr);
+ return true;
+ } else {
+ cuckoo_internal::ThrowException<_Resize_request>();
+ }
+ }
+ return false;
+ }
+
+ std::pair
+ _M_insert_unique_with_bitset(const_reference obj,
+ size_type g0,
+ _Bucket_number b,
+ int bitset,
+ bool easy) {
+ if (bump_work_count_and_maybe_resize(easy)) {
+ return _M_insert_unique(obj, _S_easy);
+ }
+ for (int lg_t = 0, t = 1; t <= _Slot::_S_max_bitset; lg_t++, t *= 2) {
+ if ((bitset & t) == 0) {
+ const _Bucket_number q = b ^ lg_t;
+ if (_M_slots[q].is_ptr_null()) {
+ if (lg_t == 0) {
+ assert(q == b);
+ _M_slots[q].set_ptr_and_bitset(&obj, bitset | t,
+ easy, _M_get_rollback_state());
+ } else {
+ _M_slots[q].set_ptr_that_was_null(&obj, easy,
+ _M_get_rollback_state());
+ _M_slots[b].xor_bitset(t, easy, _M_get_rollback_state());
+ }
+ _M_increment_num_elements();
+ return std::make_pair(_M_iter_at(q), _S_did_insert);
+ }
+ }
+ }
+ // No good spot is available. We either need to bump something else to
+ // make space, or convert bucket b to overfull. Currently we take the
+ // simplest path: make bucket b overfull, though this policy is not
+ // necessarily optimal.
+ return _M_convert_bucket_to_overfull_and_insert_unique(obj, g0,
+ b, bitset);
+ }
+
+ // This is equivalent to _M_find_or_insert(), above, except that it requires
+ // that the thing we're inserting isn't already present.
+ // Requires count(get_key(obj)) == 0.
+ std::pair
+ _M_insert_unique(const_reference __obj, bool __easy)
+ {
+ const size_type __g0 = _M_hash0(get_key(__obj));
+ const _Bucket_number __b = _M_mod_bucket_count(__g0);
+ return _M_slots[__b].has_bitset()
+ ? _M_insert_unique_with_bitset(__obj, __g0, __b, _M_slots[__b].get_bitset(),
+ __easy)
+ : _M_cuckoo_insert_unique(__obj, __g0, __b, __easy);
+ }
+
+ void
+ _M_increment_num_elements()
+ {
+ ++_M_num_elements;
+ if (_M_num_elements >= _M_settings._M_enlarge_threshold)
+ {
+ // Bump up _M_work_since_last_rebuild to force a resize soon.
+ _M_work_since_last_rebuild = _M_work_threshold;
+ }
+ }
+
+ void
+ _M_decrement_num_elements()
+ {
+ --_M_num_elements;
+ if (_M_num_elements < _M_settings._M_shrink_threshold && _M_allow_shrinking)
+ {
+ // Bump up _M_work_since_last_rebuild to force a resize soon.
+ _M_work_since_last_rebuild = _M_work_threshold;
+ }
+ }
+
+ class _Disallow_shrink_holder
+ {
+ public:
+ explicit _Disallow_shrink_holder(bool* __p)
+ : _M_allow_shrinking_ptr(__p), _M_val(*__p)
+ { *__p = false; }
+
+ ~_Disallow_shrink_holder() { *_M_allow_shrinking_ptr = _M_val; }
+
+ private:
+ bool* const _M_allow_shrinking_ptr;
+ const bool _M_val;
+ };
+
+ class _Disallow_rebucket_holder {
+ public:
+ explicit _Disallow_rebucket_holder(bool* __p)
+ : _M_allow_rebucketing_ptr(__p), _M_val(*__p)
+ { *__p = false; }
+
+ ~_Disallow_rebucket_holder() { *_M_allow_rebucketing_ptr = _M_val; }
+
+ private:
+ bool* const _M_allow_rebucketing_ptr;
+ const bool _M_val;
+ };
+
+ std::pair
+ _M_convert_bucket_to_overfull_and_insert_unique(const_reference __obj,
+ size_type __g0,
+ _Bucket_number __b,
+ int __bitset)
+ {
+ assert(_M_slots[__b].get_bitset() == __bitset);
+ // The next two locals track the items we've temporarily removed.
+ size_type __bumped_count = 0;
+ const_pointer __bumped_items[_Slot::_S_bitset_capacity];
+ _Rollback_state __rs;
+ if (_S_exceptions_enabled && _M_get_rollback_state() == nullptr)
+ {
+ init_rollback_state(&__rs);
+ }
+ __try
+ {
+ _Disallow_shrink_holder dsh(&_M_allow_shrinking);
+ for (int __lg_t = 0, __t = 1; __t <= __bitset; __lg_t++, __t *= 2) {
+ if ((__bitset & __t) != 0) {
+ __bumped_items[__bumped_count++] = _M_slots[__b ^ __lg_t].get_ptr();
+ _M_slots[__b ^ __lg_t].set_ptr_to_null(_S_hard_if_exceptions_enabled,
+ _M_get_rollback_state());
+ }
+ }
+ _M_slots[__b].clear_bitset(_S_hard_if_exceptions_enabled,
+ _M_get_rollback_state());
+ _M_num_elements -= __bumped_count;
+ const auto __old_version = _M_settings._M_num_copies();
+ _M_slots[__b].set_overfull(_S_hard_if_exceptions_enabled,
+ _M_get_rollback_state());
+ while (__bumped_count != 0) {
+ const_reference __bumped_obj = *__bumped_items[--__bumped_count];
+ size_type __g0 = _M_hash0(get_key(__bumped_obj));
+ assert(__b == _S_impossible_bucket
+ || __b == _M_mod_bucket_count(__g0));
+ _M_likely__M_cuckoo_insert_unique(__bumped_obj, __g0, __b,
+ _S_hard_if_exceptions_enabled);
+ auto __version = _M_settings._M_num_copies();
+ __b = __version == __old_version ? __b : _S_impossible_bucket;
+ }
+ auto __result =
+ _M_likely__M_cuckoo_insert_unique(__obj,
+ __g0,
+ __b,
+ _S_hard_if_exceptions_enabled);
+ if (_S_exceptions_enabled && _M_get_rollback_state() == &__rs)
+ {
+ _M_clear_rollback_state();
+ }
+ return __result;
+ }
+ __catch (...)
+ {
+ _M_rollback();
+ __throw_exception_again;
+ }
+ }
+
+ void init_bookkeeping_data() {
+ _M_num_non_element_slots = 0;
+ _M_extension_list = nullptr;
+ }
+
+ void destroy_bookkeeping_data() {
+ PopAll(&_M_extension_list);
+ _M_num_non_element_slots = 0;
+ }
+
+ void init_rollback_state(_Rollback_state *rs) {
+ assert(_S_exceptions_enabled);
+#if __cpp_exceptions
+ assert(_M_get_rollback_state() == nullptr);
+ _M_rollback_state = rs;
+ rs->_M_num_elements = _M_num_elements;
+ rs->_M_num_non_element_slots = _M_num_non_element_slots;
+#endif
+ }
+
+ // Using cuckoo-style insertion, add __obj (or a copy of it) to the table.
+ // Unless __b == _S_impossible_bucket, in which case this routine is equivalent
+ // to _M_insert_unique().
+ // Requires count(get_key(__obj)) == 0 and either
+ // __b == _S_impossible_bucket or __b == _M_preferred_bucket(__obj).
+ std::pair
+ _M_likely__M_cuckoo_insert_unique(const_reference __obj,
+ size_type __g0,
+ _Bucket_number __b,
+ bool __easy)
+ {
+ return (__b == _S_impossible_bucket) ? _M_insert_unique(__obj, __easy)
+ : _M_cuckoo_insert_unique(__obj, __g0, __b, __easy);
+ }
+
+ // Using cuckoo-style insertion, add obj to the table.
+ // Requires count(get_key(obj)) == 0.
+ // Requires _M_hash0(get_key(obj)) = g0.
+ // Requires _M_preferred_bucket(obj) == b.
+ std::pair _M_cuckoo_insert_unique(const_reference obj,
+ size_type g0,
+ _Bucket_number b,
+ bool easy) {
+ if (bump_work_count_and_maybe_resize(easy)) {
+ return _M_insert_unique(obj, _S_easy);
+ }
+
+ // Possible nests for obj include, for example, for _S_z=4,
+ // h1, h1 ^ 1, h1 ^ 2, h1 ^ 3, h2, h2 ^ 1, h2 ^ 2, and h2 ^ 3.
+ const size_type g1 = _M_hash1(get_key(obj), g0);
+ const _Bucket_number h1 = _M_mod_bucket_count(g1);
+ for (_Bucket_number i = 0; i < _S_z; i++) {
+ if (_M_slots[h1 ^ i].is_available()) {
+ return _M_cuckoo_insert_to_slot(obj, b, h1 ^ i, easy);
+ }
+ }
+ const size_type g2 = _M_hash2(get_key(obj), g0);
+ const _Bucket_number h2 = _M_mod_bucket_count(g2);
+ for (_Bucket_number i = 0; i < _S_z; i++) {
+ if (_M_slots[h2 ^ i].is_available()) {
+ return _M_cuckoo_insert_to_slot(obj, b, h2 ^ i, easy);
+ }
+ }
+ auto p = extensions_if_any(g1, g2);
+ if (p.nodes_examined / 4 > sizeof(bucket_count()) * 8 ||
+ bucket_count() >> (p.nodes_examined / 4) == 0) {
+ // If we iterated through more than about 4 lg b extensions then the table
+ // is getting clogged. Substantially increase _M_work_since_last_rebuild.
+ _M_work_since_last_rebuild += (_M_work_since_last_rebuild >> 4) + 1;
+ }
+ if (p.found_any) {
+ // At least one relevant extension exists. Look for an empty nest.
+ _Extension_list* el = p.el;
+ _Extension* first_extension = &el->_M_extension;
+ const _Extension* e = first_extension;
+ // The first one is a special case: only e->i0 is used.
+ assert(e->Matches(g1, g2));
+ if (_M_slots[e->i0].is_available()) {
+ return _M_cuckoo_insert_to_slot(obj, b, e->i0, easy);
+ }
+ size_type num_to_search_per_extension = 2;
+ size_type _M_num_bucketssearched = 2 * _S_z + 1;
+ // Search subsequent extensions, if any.
+ _Extension_list* last_extension_list_visited = el;
+ while ((el = el->_M_next_in_chain) != nullptr) {
+ e = &el->_M_extension;
+ if (!e->Matches(g1, g2)) break;
+ _Permutation_iterator pi(e->i0, e->i1, _M_num_buckets,
+ num_to_search_per_extension);
+ do {
+ _Bucket_number q = pi._M_next();
+ if (_M_slots[q].is_available()) {
+ return _M_cuckoo_insert_to_slot(obj, b, q, easy);
+ }
+ ++_M_num_bucketssearched;
+ } while (!pi._M_done());
+ num_to_search_per_extension *= 2;
+ last_extension_list_visited = el;
+ if (num_to_search_per_extension == bucket_count()) {
+ // The whole table was just searched, and there is nowhere to put
+ // another item. Force a resize. But first, lower max_load_factor.
+ float mlf = lower_max_load_factor();
+ _M_work_since_last_rebuild = _M_work_threshold;
+ bool did_resize = bump_work_count_and_maybe_resize(easy);
+ assert(did_resize);
+ return _M_insert_unique(obj, _S_easy);
+ }
+ }
+ return _M_insert_by_creating_another_extension(
+ obj, last_extension_list_visited, g0, g1, g2, b);
+ }
+ static_assert(0 <= _S_z, "z cannot be negative"); // Usually 0 < _S_z < 10.
+ static_assert(_S_z < 1 << 30, "z is ridiculously large");
+ uint32_t r = _M_rand_at_most(_S_z * 4 - 1);
+ if ((r & 1) == 0) {
+ return _M_insert_by_creating_first_extension(obj, g0, g1, g2, b);
+ }
+ _Bucket_number victim = ((r & 2) == 0 ? h2 : h1) ^ (r >> 2);
+ // Rarely, we may find that _M_slots[victim] is pointing to
+ // something we cannot bump. If that happens, just start over.
+ if (!_M_slots[victim].can_point_to_element()) {
+ return _M_insert_unique(obj, easy);
+ }
+ const_pointer bumped_item = _M_slots[victim].get_ptr();
+ if (easy && _S_exceptions_enabled) {
+ // The caller has no _Rollback_state in progress, but we may need to
+ // handle an exception thrown during the "erase, insert, reinsert" here.
+ assert(_M_get_rollback_state() == nullptr);
+ _Rollback_state rs;
+ init_rollback_state(&rs);
+ __try {
+ _M_erase_known_key_no_rebucket(get_key(*bumped_item),
+ _S_hard_if_exceptions_enabled, _S_no_delete);
+ assert(_M_slots[victim].is_ptr_null());
+ auto result = _M_cuckoo_insert_to_slot(obj, b, victim, _S_hard);
+ _M_insert_unique(*bumped_item, _S_hard);
+ _M_clear_rollback_state();
+ return result;
+ } __catch (...) {
+ _M_rollback();
+ __throw_exception_again;
+ }
+ } else {
+ _M_erase_known_key_no_rebucket(get_key(*bumped_item), easy, _S_no_delete);
+ assert(_M_slots[victim].is_ptr_null());
+ auto result = _M_cuckoo_insert_to_slot(obj, b, victim, easy);
+ _M_insert_unique(*bumped_item, _S_hard_if_exceptions_enabled);
+ return result;
+ }
+ }
+
+ std::pair<_Bucket_number, _Bucket_number>
+ get_bucket_numbers_for_extensions(size_type g1, size_type g2) const {
+ // But, perhaps unnecessarily, we use multiple-choice chaining, so
+ // we return two distinct bucket numbers here. (See also the
+ // comment about 4 lg b, above.) The code assumes that the number of
+ // buckets is even, but it is easy to remove that assumption if it ever
+ // becomes necessary.
+ assert(_M_rng_state != nullptr);
+ _Bucket_number result0 = g1 * _M_random_odd_number() + g2;
+ result0 ^= result0 >> (sizeof(result0) == 8 ? 47 : 17);
+ result0 = _M_mod_bucket_count(result0 * _M_random_odd_number());
+ _Bucket_number result1 = g2 * _M_random_odd_number() + g1;
+ result1 ^= result1 >> (sizeof(result1) == 8 ? 46 : 16);
+ result1 = _M_mod_bucket_count((2 * result1 + 1) ^ result0);
+ assert(result0 != result1);
+ return {result0, result1};
+ }
+
+ struct ExtensionsIfAnyResult
+ {
+ // Whether any extensions for the pair exist.
+ bool found_any;
+ // The first such extension (if any).
+ _Extension_list* el;
+ // Approximately how many _Extension_list nodes were examined.
+ size_type nodes_examined;
+ };
+
+ ExtensionsIfAnyResult extensions_if_any(size_type g1,
+ size_type g2) const {
+ if (_M_extension_list == nullptr) {
+ return {false, 0, 0};
+ }
+
+ const auto indices_to_examine = get_bucket_numbers_for_extensions(g1, g2);
+ _Extension_list *el = nullptr;
+ size_type iter_count = 0;
+ for (_Bucket_number index_to_examine :
+ {indices_to_examine.first, indices_to_examine.second}) {
+ if (_M_slots[index_to_examine].contains_extension_list()) {
+ el = _M_slots[index_to_examine].get_extension_listptr();
+ for (; el != nullptr; ++iter_count, el = el->_M_next_in_chain) {
+ if (el->_M_extension.Matches(g1, g2)) {
+ return {true, el, iter_count};
+ }
+ }
+ }
+ }
+ return {false, el, iter_count};
+ }
+
+ std::pair
+ _M_insert_by_creating_first_extension(const_reference obj, size_type g0,
+ size_type g1, size_type g2,
+ _Bucket_number b) {
+ const _Bucket_number bucket_for_extensions =
+ least_crowded_bucket(get_bucket_numbers_for_extensions(g1, g2));
+ _Rollback_state rs;
+ if (_S_exceptions_enabled && _M_get_rollback_state() == nullptr) {
+ init_rollback_state(&rs);
+ }
+ __try {
+ _Slot* s = &_M_slots[bucket_for_extensions];
+ const_pointer bumped_item = nullptr;
+ _Extension_list *el = nullptr;
+ if (s->can_point_to_element()) {
+ if (!s->is_ptr_null()) { // Need to bump an element
+ bumped_item = s->get_ptr();
+ _M_erase_known_key_no_rebucket(get_key(*bumped_item),
+ _S_hard_if_exceptions_enabled, _S_no_delete);
+ assert(s->is_ptr_null());
+ }
+ ++_M_num_non_element_slots;
+ } else {
+ el = s->get_extension_listptr();
+ }
+ // No element is in the bucket now. Insert at the front of the SLL of
+ // extensions.
+ std::pair p = _M_randomly_hunt_for_bucket();
+ el = new _Extension_list{_M_extension_list, el,
+ _Extension{g1, g2, p.second}};
+ _M_extension_list = el;
+ s->set_ptr_and_mark_as_extension_list(el, _S_hard_if_exceptions_enabled,
+ _M_get_rollback_state());
+ std::pair result;
+ if (p.first && p.second != bucket_for_extensions) {
+ assert(_M_slots[p.second].is_available());
+ result = _M_cuckoo_insert_to_slot(obj, b, p.second,
+ _S_hard_if_exceptions_enabled);
+ } else {
+ result = _M_cuckoo_insert_unique(obj, g0, b, _S_hard_if_exceptions_enabled);
+ }
+ // Almost done. Reinsert bumped item, if there is one.
+ if (bumped_item != nullptr) {
+ _M_insert_unique(*bumped_item, _S_hard_if_exceptions_enabled);
+ }
+ if (_S_exceptions_enabled && _M_get_rollback_state() == &rs)
+ _M_clear_rollback_state();
+ return result;
+ } __catch (...) {
+ _M_rollback();
+ __throw_exception_again;
+ }
+ }
+
+ // There are only two important cases: one of the buckets is
+ // available and the other isn't, or both buckets contain lists and
+ // the lists are different lengths. If we get both of those right
+ // we can hope to achieve the well-known O(ln ln n) bound for chain
+ // length when chaining with the choice of two buckets. In the
+ // unimportant cases, this method may return either p.first or p.second.
+ _Bucket_number
+ least_crowded_bucket(std::pair<_Bucket_number, _Bucket_number> __p) const
+ {
+ const _Slot& __s0 = _M_slots[__p.first];
+ const _Slot& __s1 = _M_slots[__p.second];
+ if (__s0.is_available()) return __p.first;
+ if (__s1.is_available()) return __p.second;
+ if (__s0.contains_extension_list() && __s1.contains_extension_list())
+ {
+ _Extension_list* __el0 = __s0.get_extension_listptr();
+ _Extension_list* __el1 = __s1.get_extension_listptr();
+ for (;;)
+ {
+ if ((__el0 = __el0->_M_next_in_chain) == nullptr) return __p.first;
+ if ((__el1 = __el1->_M_next_in_chain) == nullptr) return __p.second;
+ }
+ }
+ return __p.first;
+ }
+
+ std::pair
+ _M_insert_by_creating_another_extension(const_reference __obj,
+ _Extension_list* __last_extension_visited,
+ size_type __g0,
+ size_type __g1,
+ size_type __g2,
+ _Bucket_number __b) {
+ _Rollback_state __rs;
+ if (_S_exceptions_enabled && _M_get_rollback_state() == nullptr) {
+ init_rollback_state(&__rs);
+ }
+ __try
+ {
+ std::pair __p = _M_randomly_hunt_for_bucket();
+ std::pair __pp;
+ // Try to find two empty buckets, by probing at random.
+ // Whether or not two empty buckets are found, this loop will
+ // terminate in a reasonable amount of time, usually one iteration.
+ do {
+ __pp = _M_randomly_hunt_for_bucket();
+ } while (__p.second == __pp.second);
+ _Extension_list* __el = new _Extension_list{
+ _M_extension_list, __last_extension_visited->_M_next_in_chain,
+ _Extension{__g1, __g2, __p.second, __pp.second}};
+ // TODO: test if new returned nullptr(?)
+ _M_extension_list = __el;
+ _M_rollbackable_write(__last_extension_visited->_M_next_in_chain,
+ __el);
+ std::pair __result;
+ if (__p.first)
+ {
+ assert(_M_slots[__p.second].is_available());
+ __result =
+ _M_cuckoo_insert_to_slot(__obj, __b, __p.second,
+ _S_hard_if_exceptions_enabled);
+ }
+ else
+ {
+ __result = _M_cuckoo_insert_unique(__obj, __g0, __b,
+ _S_hard_if_exceptions_enabled);
+ }
+ if (_S_exceptions_enabled && (_M_get_rollback_state() == &__rs))
+ {
+ _M_clear_rollback_state();
+ }
+ return __result;
+ }
+ __catch (...)
+ {
+ _M_rollback();
+ __throw_exception_again;
+ }
+ }
+
+ std::pair
+ _M_randomly_hunt_for_bucket()
+ {
+ _Bucket_number __result = 0;
+ for (int __i = 0; __i < _S_random_hunt_max_iters; __i++)
+ {
+ __result = _M_rand_at_most(bucket_count() - 1);
+ if (_M_slots[__result].is_available())
+ {
+ return {true, __result};
+ }
+ }
+ return {false, __result};
+ }
+
+ // Put obj in one its possible nests, _M_slots[b].
+ // Requires _M_preferred_bucket(obj) == pref.
+ // Requires _M_slots[b] to be a possible nest for obj that is currently empty.
+ std::pair _M_cuckoo_insert_to_slot(const_reference __obj,
+ _Bucket_number __pref,
+ _Bucket_number __b,
+ bool __easy) {
+ _M_slots[__b].set_ptr_that_was_null(&__obj, __easy, _M_get_rollback_state());
+ _M_increment_num_elements();
+ return {_M_iter_at(__b), _S_did_insert};
+ }
+
+ void _M_rollback() {
+#if __cpp_exceptions
+ if (_M_rollback_state == nullptr) return;
+ // Undo everything in the log, starting with the most recent.
+ for (size_type i = _M_rollback_state->size(); i-- > 0; ) {
+ typename _Rollback_state::Record& r = _M_rollback_state->at(i);
+ memcpy(r.ptr, &r.old_value, sizeof(r.old_value));
+ }
+ // Undo allocation/construction of vals, if any.
+ for (const auto& v : _M_rollback_state->_M_vals_created) {
+ _M_delete_value(*v);
+ }
+
+ _M_num_elements = _M_rollback_state->_M_num_elements;
+ _M_num_non_element_slots = _M_rollback_state->_M_num_non_element_slots;
+ _M_rollback_state = nullptr;
+#endif
+ }
+
+ _Bucket_number
+ _M_preferred_bucket_for_key(const key_type& __key) const
+ { return _M_mod_bucket_count(_M_hash0(__key)); }
+
+ _Bucket_number
+ _M_preferred_bucket(const_reference __obj) const
+ { return _M_preferred_bucket_for_key(get_key(__obj)); }
+
+ iterator
+ _M_iter_at(_Bucket_number __b)
+ { return iterator(this, __b, _S_no_advance); }
+
+ // Specializations of insert(it, it) depending on the power of the iterator:
+ // (1) Iterator has multi-pass guarantee.
+ template
+ void insert(_ForwardIterator __f, _ForwardIterator __l,
+ std::forward_iterator_tag)
+ {
+ size_t __dist = std::distance(__f, __l);
+ if (__dist >= std::numeric_limits::max()) {
+#if __cpp_exceptions
+ throw std::length_error("insert-range overflow");
+#else
+ assert(0 && "insert-range overflow");
+#endif
+ }
+ _M_resize_delta(static_cast(__dist));
+ for ( ; __dist > 0; --__dist, ++__f) {
+ insert(*__f);
+ }
+ }
+
+ // (2) Arbitrary iterator, can't tell how much to resize
+ template
+ void insert(_InputIterator __f, _InputIterator __l,
+ std::input_iterator_tag) {
+ for ( ; __f != __l; ++__f) {
+ insert(*__f);
+ }
+ }
+
+ public:
+ // This is the normal insert routine, used by the outside world.
+ template
+ std::pair
+ insert(_V&& __v, bool __copy = _S_copy)
+ {
+ assert(_M_get_rollback_state() == nullptr);
+ for (;;) {
+ __try
+ {
+ return _M_find_or_insert(std::forward<_V>(__v), __copy);
+ }
+ __catch (_Resize_request&)
+ {
+ _M_resize_delta(1);
+ }
+ }
+ }
+
+ void
+ insert(std::initializer_list __il)
+ { this->insert(__il.begin(), __il.end()); }
+
+ // When inserting a lot at a time, we specialize on the type of iterator
+ template
+ void insert(_InputIterator __f, _InputIterator __l)
+ {
+ // specializes on iterator type
+ this->insert(__f, __l,
+ typename std::iterator_traits<_InputIterator>::iterator_category());
+ }
+
+ template
+ iterator
+ insert(const_iterator __hint, _V&& __v)
+ { return insert(std::forward<_V>(__v)).first; }
+
+ template
+ std::pair
+ emplace(_Args&&... __args)
+ {
+ // Create node and compute hash. If something goes wrong or no insertion
+ // was needed then we'll delete it.
+ const_pointer __p = new value_type(std::forward<_Args>(__args)...);
+ std::pair __result;
+ __try
+ {
+ __result = this->insert(*__p, _S_no_copy);
+ }
+ __catch (...)
+ {
+ _M_delete_value(*__p);
+ __throw_exception_again;
+ }
+ if (!__result.second)
+ {
+ _M_delete_value(*__p);
+ }
+ return __result;
+ }
+
+ template
+ iterator emplace_hint(const_iterator, _Args&&... __args)
+ {
+ // Create node and compute hash. If something goes wrong or no insertion
+ // was needed then we'll delete it.
+ const_pointer __p = new value_type(std::forward<_Args>(__args)...);
+ std::pair __result;
+ __try
+ {
+ __result = this->insert(*__p, _S_no_copy);
+ }
+ __catch (...)
+ {
+ _M_delete_value(*__p);
+ __throw_exception_again;
+ }
+ if (!__result.second)
+ {
+ _M_delete_value(*__p);
+ }
+ return __result.first;
+ }
+
+ // DELETION ROUTINES
+ size_type
+ erase(const key_type& __key)
+ { return _M_erase(__key, _S_easy, _S_delete); }
+
+ iterator
+ erase(iterator pos)
+ { return _M_erase_iter(pos); }
+
+ iterator
+ erase(const_iterator pos)
+ { return _M_erase_iter(pos); }
+
+ iterator
+ erase(const_iterator __f, const_iterator __l)
+ {
+ while (__f != __l) {
+ __f = this->erase(__f);
+ }
+ return __f;
+ }
+
+ private:
+ iterator
+ _M_erase_iter(iterator __pos)
+ {
+ _Disallow_rebucket_holder __dbh(&_M_allow_rebucketing);
+ assert(__pos != end());
+ iterator __result = std::next(__pos);
+ erase(*__pos);
+ return __result;
+ }
+
+ size_type
+ _M_erase(const key_type& __k, bool __easy, bool __del)
+ {
+ assert(__easy || !__del);
+ if (bump_work_count_and_maybe_resize(__easy))
+ {
+ return _M_erase(__k, _S_easy, __del);
+ }
+ const size_type __g0 = _M_hash0(__k);
+ const _Bucket_number __b = _M_mod_bucket_count(__g0);
+ if (_M_slots[__b].has_bitset())
+ {
+ // Common case. We have a bitset telling us where to look.
+ const int __bitset = _M_slots[__b].get_bitset();
+ for (int __lg_t = 0, __t = 1; __t <= __bitset; __lg_t++, __t *= 2)
+ {
+ if ((__bitset & __t) != 0)
+ {
+ const _Bucket_number __q = __b ^ __lg_t;
+ if (!_M_slots[__q].is_ptr_null()
+ && _M_equals(get_key(*_M_slots[__q].get_ptr()), __k))
+ {
+ // Found a match for __obj.
+ if (__del) _M_delete_value(*_M_slots[__q].get_ptr());
+ _M_slots[__q].set_ptr_to_null(__easy, _M_get_rollback_state());
+ _M_slots[__b].xor_bitset(__t, __easy, _M_get_rollback_state());
+ _M_decrement_num_elements();
+ return 1;
+ }
+ }
+ }
+ return 0;
+ }
+ else
+ {
+ // Uncommon case. Search an overfull bucket.
+ const size_type __g1 = _M_hash1(__k, __g0);
+ const _Bucket_number __h1 = _M_mod_bucket_count(__g1);
+ for (_Bucket_number __i = 0; __i < _S_z; __i++)
+ {
+ const _Bucket_number __q = __h1 ^ __i;
+ if (_M_slots[__q].has_element()
+ && _M_equals(get_key(*_M_slots[__q].get_ptr()), __k))
+ {
+ return _M_cuckoo_remove_from_slot(__q, __easy, __del);
+ }
+ }
+ const size_type __g2 = _M_hash2(__k, __g0);
+ const _Bucket_number __h2 = _M_mod_bucket_count(__g2);
+ for (_Bucket_number __i = 0; __i < _S_z; __i++)
+ {
+ const _Bucket_number __q = __h2 ^ __i;
+ if (_M_slots[__q].has_element()
+ && _M_equals(get_key(*_M_slots[__q].get_ptr()), __k))
+ {
+ return _M_cuckoo_remove_from_slot(__q, __easy, __del);
+ }
+ }
+ // Check for Extensions.
+ auto __p = extensions_if_any(__g1, __g2);
+ if (__p.found_any)
+ {
+ // At least one relevant extension exists.
+ _Extension_list* __el = __p.el;
+ const _Extension* __e = &__el->_M_extension;
+ // The first one is a special case: only e->i0 is used.
+ assert(__e->Matches(__g1, __g2));
+ const _Bucket_number __q = __e->i0;
+ if (_M_slots[__q].has_element()
+ && _M_equals(get_key(*_M_slots[__q].get_ptr()), __k))
+ {
+ return _M_cuckoo_remove_from_slot(__q, __easy, __del);
+ }
+ // Search subsequent extensions, if any.
+ size_type __num_to_search_per_extension = 2;
+ while ((__el = __el->_M_next_in_chain) != nullptr)
+ {
+ __e = &__el->_M_extension;
+ if (!(__e->Matches(__g1, __g2))) break;
+ _Permutation_iterator __pi(__e->i0, __e->i1, _M_num_buckets,
+ __num_to_search_per_extension);
+ do {
+ const _Bucket_number __q = __pi._M_next();
+ if (_M_slots[__q].has_element()
+ && _M_equals(get_key(*_M_slots[__q].get_ptr()), __k))
+ {
+ return _M_cuckoo_remove_from_slot(__q, __easy, __del);
+ }
+ } while (!__pi._M_done());
+ __num_to_search_per_extension *= 2;
+ }
+ }
+ return 0;
+ }
+ }
+
+ // Equivalent to erase(k, easy, del), but requires k to be present.
+ size_type
+ _M_erase_known_key(const key_type& __k, bool __easy, bool __del)
+ {
+ int __count = _M_erase(__k, __easy, __del);
+ assert(__count == 1);
+ return __count;
+ }
+
+ // The same, but do not allow rebucketing.
+ size_type
+ _M_erase_known_key_no_rebucket(const key_type& __k, bool __easy,
+ bool __del)
+ {
+ _Disallow_rebucket_holder __dbh(&_M_allow_rebucketing);
+ return _M_erase_known_key(__k, __easy, __del);
+ }
+
+ size_type
+ _M_cuckoo_remove_from_slot(_Bucket_number __b, bool __easy, bool __del)
+ {
+ if (__del) _M_delete_value(*_M_slots[__b].get_ptr());
+ _M_slots[__b].set_ptr_to_null(__easy, _M_get_rollback_state());
+ _M_decrement_num_elements();
+ return 1;
+ }
+
+ _Bucket_number
+ _M_mod_bucket_count(_Bucket_number __b) const
+ { return __b & (bucket_count() - 1); }
+
+ pointer*
+ _M_extend_rollback_statefor_new_values()
+ {
+ if (!_S_exceptions_enabled || _M_get_rollback_state() == nullptr)
+ {
+ return nullptr;
+ }
+ _M_get_rollback_state()->_M_vals_created
+ .resize(_M_get_rollback_state()->_M_vals_created.size() + 1);
+ return &_M_get_rollback_state()->_M_vals_created.back();
+ }
+
+ public:
+ // COMPARISON
+ bool
+ _M_equal(const _Cuckoo& ht) const
+ {
+ if (size() != ht.size())
+ {
+ return false;
+ }
+ else if (this == &ht)
+ {
+ return true;
+ }
+ else
+ {
+ for (const_reference r : *this)
+ {
+ if (ht.count(get_key(r)) != 1)
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+
+ private:
+ // Package functors with another class to eliminate memory needed for
+ // zero-size functors. Since _Extract_key and hasher's operator() might
+ // have the same function signature, they must be packaged in
+ // different classes.
+ struct _Settings
+ : cuckoo_internal::_Cuckoo_settings
+ {
+ explicit _Settings(const hasher& __h)
+ : cuckoo_internal::_Cuckoo_settings(
+ __h, _S_default_max_occupancy_percent / 100.0f,
+ _S_default_consider_shrink_occupancy / 100.0f)
+ { }
+ };
+
+ // Packages _Extract_key and _Equal_key functors.
+ class _KeyInfo : public _Extract_key, public _Equal_key {
+ public:
+ _KeyInfo(const _Extract_key& __ek, const _Equal_key& __eq)
+ : _Extract_key(__ek), _Equal_key(__eq) { }
+
+ // The return type of _Extract_key::operator(), e.g., _Key or const _Key&.
+ typedef decltype(_Extract_key()(*(const_pointer)0)) _Get_key_type;
+
+ _Get_key_type
+ get_key(const_reference __v) const
+ { return _Extract_key::operator()(__v); }
+
+ bool
+ _M_equals(const key_type& __a, const key_type& __b) const
+ { return _Equal_key::operator()(__a, __b); }
+ };
+
+ // Utility functions to access the templated operators.
+ size_type
+ _M_hash0(const key_type& __v) const
+ { return _M_settings._M_hash0(__v); }
+
+ size_type
+ _M_hash1(const key_type& __v, size_type __g0) const
+ { return _M_settings._M_hash1(__v, __g0); }
+
+ size_type
+ _M_hash2(const key_type& __v, size_type __g0) const
+ { return _M_settings._M_hash2(__v, __g0); }
+
+ bool
+ _M_equals(const key_type& __a, const key_type& __b) const
+ { return _M_key_info._M_equals(__a, __b); }
+
+ typename _KeyInfo::_Get_key_type
+ get_key(const_reference __v) const
+ { return _M_key_info.get_key(__v); }
+
+ void
+ set_key(pointer __v, const key_type& __k) const
+ { _M_key_info.set_key(__v, __k); }
+
+ ////////////////////////////////////////////////////////////////////////
+
+ /* array of _M_num_buckets slots */
+ _Slot* _M_slots;
+ _Settings _M_settings;
+ _KeyInfo _M_key_info;
+
+ size_type _M_num_elements;
+ size_type _M_num_non_element_slots; // ignores those with pointer == nullptr
+ size_type _M_num_buckets;
+ size_type _M_work_since_last_rebuild;
+ size_type _M_work_threshold;
+#if __cpp_exceptions
+ _Rollback_state* _M_rollback_state;
+#endif
+ // Linked list of all extensions. As new ones are created, they are
+ // pushed on the front of the list. This is just bookkeeping: the
+ // list is traversed only during rollbacks or our destructor.
+ _Extension_list* _M_extension_list;
+ value_alloc_type _M_value_alloc;
+ // If _M_allow_shrinking is false then disallow shrinking. This is used in
+ // situations where we know we are increasing the load but there may be
+ // a temporary dip in the load along the way, and it's best not to allow
+ // that to trigger a shrink.
+ bool _M_allow_shrinking;
+ // An even more extreme option: we can disallow rebucketing.
+ bool _M_allow_rebucketing;
+ // Count buckets consumed by bookkeeping data rather than const_pointers.
+ _RNGState* _M_rng_state;
+ };
+
+#endif /* _CUCKOO_H */
Property changes on: libstdc++-v3/include/bits/cuckoo.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+LF
\ No newline at end of property
Index: libstdc++-v3/include/bits/unordered_set.h
===================================================================
--- libstdc++-v3/include/bits/unordered_set.h (revision 227597)
+++ libstdc++-v3/include/bits/unordered_set.h (working copy)
@@ -30,6 +30,15 @@
#ifndef _UNORDERED_SET_H
#define _UNORDERED_SET_H
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET && \
+ defined(_GLIBCXX_DEBUG_UNORDERED_SET) || !defined(_LP64)
+#undef USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+#endif
+
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+#include
+#endif
+
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_CONTAINER
@@ -92,7 +101,15 @@
class _Alloc = std::allocator<_Value> >
class unordered_set
{
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+ struct _Identity {
+ template
+ T&& operator()(T&& t) const { return std::forward(t); }
+ };
+ typedef _Cuckoo<_Value, _Value, _Hash, _Identity, _Pred, _Alloc> _Hashtable;
+#else
typedef __uset_hashtable<_Value, _Hash, _Pred, _Alloc> _Hashtable;
+#endif
_Hashtable _M_h;
public:
@@ -137,7 +154,11 @@
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+ : _M_h(__n, __hf, __eql, _Identity(), __a)
+#else
: _M_h(__n, __hf, __eql, __a)
+#endif
{ }
/**
@@ -159,8 +180,16 @@
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+ : _M_h(__n, __hf, __eql, _Identity(), __a)
+ {
+ this->insert(__first, __last);
+ this->max_load_factor(1.0);
+ }
+#else
: _M_h(__first, __last, __n, __hf, __eql, __a)
{ }
+#endif
/// Copy constructor.
unordered_set(const unordered_set&) = default;
@@ -213,8 +242,13 @@
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+ : unordered_set(__n, __hf, __eql, __a)
+ { this->insert(__l); }
+#else
: _M_h(__l, __n, __hf, __eql, __a)
{ }
+#endif
unordered_set(size_type __n, const allocator_type& __a)
: unordered_set(__n, hasher(), key_equal(), __a)
@@ -739,6 +773,30 @@
friend bool
operator==(const unordered_set<_Value1, _Hash1, _Pred1, _Alloc1>&,
const unordered_set<_Value1, _Hash1, _Pred1, _Alloc1>&);
+
+#if USE_HYBRID_WILD_CUCKOO_FOR_UNORDERED_SET
+ // Extras
+
+ // The seed can be set explicitly, e.g., to aid debugging. Requires: empty().
+ void set_random_seed(const std::seed_seq& __s) { _M_h.set_random_seed(__s); }
+
+ // Clear the set without rebucketing.
+ void clear_no_resize() { _M_h.clear_no_resize(); }
+
+ // New tables allow rebucketing, and that's the recommended state. But,
+ // we allow rebucketing to disabled or enabled manually. The return value
+ // is whether rebucketing was allowed immediately prior to the call.
+ bool allow_rebucketing(bool __new_state) {
+ return _M_h.allow_rebucketing(__new_state);
+ }
+
+ // Similarly, new tables allow shrinking. This allows shrinking
+ // to disabled or enabled manually. The return value is whether
+ // shrinking was allowed immediately prior to the call.
+ bool allow_shrinking(bool __new_state) {
+ return _M_h.allow_shrinking(__new_state);
+ }
+#endif
};
/**