This is the mail archive of the libstdc++@gcc.gnu.org mailing list for the libstdc++ project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

hashtable local iterator


Hi

Since modification of hashtable implementation I was quite unhappy of the impact on local_iterator and especially on the end(size_type bkt) method that required to loop through all bucket nodes to access the past-the-end one. This is why I would like to propose, if it is still time, this new local iterator implementation that allow to implement begin(size_t)/end(size_t) in constant time. Note that:
- this new iterator type needs a pointer to its parent container to work which is quite unusual on normal iterators.
- I had to make an overload of _M_bucket_index public to use it in local iterator implem', do you prefer to introduce a friend declaration ?


Thanks to this new implementation I have been able to enhance unordered container profile mode because it now relies on local iterator. This way we can use most of the time the cached hash code while with the current implementation the hash code must be recomputed for each element in the container.

Tested under x86_64 linux.

2011-12-13 François Dumont <fdumont@gcc.gnu.org>

* include/bits/hashtable_policy.h (_Local_iterator_base,
_Locale_iterator, _Locale_const_iterator): New, implementation of...
* include/bits/hashtable.h (_Hashtable<>::local_iterator,
_Hashtable<>::const_local_iterator): ...those. (_M_bucket_index): New
overloads using current bucket count, use through out the _Hastable<>
implementation.
* include/debug/unordered_map
(unordered_map<>::_S_to_local, unordered_multimap<>::_S_to_local):
Adapt to match new local iterator implementation.
* include/debug/unordered_set (unordered_set<>::_S_to_local,
unordered_multiset<>::_S_to_local): Likewise.
* include/profile/unordered_map (unordered_map<>::_M_profile_destruct,
unordered_multimap<>::_M_profile_destruct): Enhance thanks to usage of
local iterators.
* include/profile/unordered_set (unordered_set<>::_M_profile_destruct,
unordered_multiset<>::_M_profile_destruct): Likewise.


If it is ok to commit I will only be able to do so on thursday.

François
Index: include/debug/unordered_map
===================================================================
--- include/debug/unordered_map	(revision 182187)
+++ include/debug/unordered_map	(working copy)
@@ -418,11 +418,19 @@
 
       static _Base_local_iterator
       _S_to_local(_Base_iterator __it)
-      { return _Base_local_iterator(__it._M_cur); }
+      {
+	// The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_local_iterator(nullptr, __it._M_cur, 0);
+      }
 
       static _Base_const_local_iterator
       _S_to_local(_Base_const_iterator __it)
-      { return _Base_const_local_iterator(__it._M_cur); }
+      {
+	// The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_const_local_iterator(nullptr, __it._M_cur, 0);
+      }
     };
 
   template<typename _Key, typename _Tp, typename _Hash,
@@ -820,11 +828,19 @@
 
       static _Base_local_iterator
       _S_to_local(_Base_iterator __it)
-      { return _Base_local_iterator(__it._M_cur); }
+      {
+	// The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_local_iterator(nullptr, __it._M_cur, 0);
+      }
 
       static _Base_const_local_iterator
       _S_to_local(_Base_const_iterator __it)
-      { return _Base_const_local_iterator(__it._M_cur); }
+      {
+	// The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_const_local_iterator(nullptr, __it._M_cur, 0);
+      }
     };
 
   template<typename _Key, typename _Tp, typename _Hash,
Index: include/debug/unordered_set
===================================================================
--- include/debug/unordered_set	(revision 182187)
+++ include/debug/unordered_set	(working copy)
@@ -417,11 +417,19 @@
 
       static _Base_local_iterator
       _S_to_local(_Base_iterator __it)
-      { return _Base_local_iterator(__it._M_cur); }
+      {
+        // The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_local_iterator(nullptr, __it._M_cur, 0);
+      }
 
       static _Base_const_local_iterator
       _S_to_local(_Base_const_iterator __it)
-      { return _Base_const_local_iterator(__it._M_cur); }
+      {
+        // The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_const_local_iterator(nullptr, __it._M_cur, 0);
+      }
     };
 
   template<typename _Value, typename _Hash, typename _Pred, typename _Alloc>
@@ -805,11 +813,19 @@
 
       static _Base_local_iterator
       _S_to_local(_Base_iterator __it)
-      { return _Base_local_iterator(__it._M_cur); }
+      {
+        // The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_local_iterator(nullptr, __it._M_cur, 0);
+      }
 
       static _Base_const_local_iterator
       _S_to_local(_Base_const_iterator __it)
-      { return _Base_const_local_iterator(__it._M_cur); }
+      {
+        // The return local iterator is not going to be incremented so we don't
+	// need to give it a real hashtable parent or compute __it node bucket
+	return _Base_const_local_iterator(nullptr, __it._M_cur, 0);
+      }
     };
 
   template<typename _Value, typename _Hash, typename _Pred, typename _Alloc>
Index: include/profile/unordered_map
===================================================================
--- include/profile/unordered_map	(revision 182188)
+++ include/profile/unordered_map	(working copy)
@@ -308,9 +308,9 @@
 	while (__it != this->end())
 	  {
 	    size_type __bkt = this->bucket(__it->first);
-	    for (++__it; __it != this->end()
-			 && this->bucket(__it->first) == __bkt;
-		 ++__it)
+	    auto __lit = this->begin(__bkt);
+	    auto __lend = this->end(__bkt);
+	    for (++__it, ++__lit; __lit != __lend; ++__it, ++__lit)
 	      ++__chain;
 	    if (__chain)
 	      {
@@ -577,9 +577,9 @@
 	while (__it != this->end())
 	  {
 	    size_type __bkt = this->bucket(__it->first);
-	    for (++__it; __it != this->end()
-			 && this->bucket(__it->first) == __bkt;
-		 ++__it)
+	    auto __lit = this->begin(__bkt);
+	    auto __lend = this->end(__bkt);
+	    for (++__it, ++__lit; __lit != __lend; ++__it, ++__lit)
 	      ++__chain;
 	    if (__chain)
 	      {
Index: include/profile/unordered_set
===================================================================
--- include/profile/unordered_set	(revision 182188)
+++ include/profile/unordered_set	(working copy)
@@ -277,10 +277,10 @@
 	while (__it != this->end())
 	  {
 	    size_type __bkt = this->bucket(*__it);
-	    for (++__it; __it != this->end() && this->bucket(*__it) == __bkt;
-		 ++__it)
+	    auto __lit = this->begin(__bkt);
+	    auto __lend = this->end(__bkt);
+	    for (++__it, ++__lit; __lit != __lend; ++__it, ++__lit)
 	      ++__chain;
-
 	    if (__chain)
 	      {
 		++__chain;
@@ -539,10 +539,10 @@
 	while (__it != this->end())
 	  {
 	    size_type __bkt = this->bucket(*__it);
-	    for (++__it; __it != this->end() && this->bucket(*__it) == __bkt;
-		 ++__it)
+	    auto __lit = this->begin(__bkt);
+	    auto __lend = this->end(__bkt);
+	    for (++__it, ++__lit; __lit != __lend; ++__it, ++__lit)
 	      ++__chain;
-
 	    if (__chain)
 	      {
 		++__chain;
Index: include/bits/hashtable.h
===================================================================
--- include/bits/hashtable.h	(revision 182187)
+++ include/bits/hashtable.h	(working copy)
@@ -177,6 +177,10 @@
       static_assert(__or_<integral_constant<bool, __cache_hash_code>,
 			  __detail::__is_noexcept_hash<_Key, _H1>>::value,
       	    "Cache the hash code or qualify your hash functor with noexcept");
+      typedef __detail::_Hash_code_base<_Key, _Value, _ExtractKey,
+					_Equal, _H1, _H2, _Hash,
+				       	__cache_hash_code> _HCBase;
+
     public:
       typedef _Allocator                                  allocator_type;
       typedef _Value                                      value_type;
@@ -191,17 +195,22 @@
 
       typedef std::size_t                                 size_type;
       typedef std::ptrdiff_t                              difference_type;
+      typedef __detail::_Local_iterator<_Hashtable, value_type,
+					__constant_iterators,
+					__cache_hash_code>
+							  local_iterator;
+      typedef __detail::_Local_const_iterator<_Hashtable, value_type,
+					      __constant_iterators,
+					      __cache_hash_code>
+							  const_local_iterator;
       typedef __detail::_Node_iterator<value_type, __constant_iterators,
 				       __cache_hash_code>
-							  local_iterator;
+							  iterator;
       typedef __detail::_Node_const_iterator<value_type,
 					     __constant_iterators,
 					     __cache_hash_code>
-							  const_local_iterator;
+							  const_iterator;
 
-      typedef local_iterator iterator;
-      typedef const_local_iterator const_iterator;
-
       template<typename _Key2, typename _Value2, typename _Ex2, bool __unique2,
 	       typename _Hashtable2>
 	friend struct __detail::_Map_base;
@@ -253,14 +262,6 @@
       _Node*
       _M_bucket_end(size_type __bkt) const;
 
-      // Gets the bucket node after the last if any
-      _Node*
-      _M_bucket_past_the_end(size_type __bkt) const
-        {
-	  _Node* __end = _M_bucket_end(__bkt);
-	  return __end ? __end->_M_next : nullptr;
-	}
-
     public:
       // Constructor, destructor, assignment, swap
       _Hashtable(size_type __bucket_hint,
@@ -364,35 +365,32 @@
 
       size_type
       bucket(const key_type& __k) const
-      {
-	return this->_M_bucket_index(__k, this->_M_hash_code(__k),
-				     bucket_count());
-      }
+      { return _M_bucket_index(__k, this->_M_hash_code(__k)); }
 
       local_iterator
       begin(size_type __n)
-      { return local_iterator(_M_bucket_begin(__n)); }
+      { return local_iterator(this, _M_bucket_begin(__n), __n); }
 
       local_iterator
       end(size_type __n)
-      { return local_iterator(_M_bucket_past_the_end(__n)); }
+      { return local_iterator(this, nullptr, __n); }
 
       const_local_iterator
       begin(size_type __n) const
-      { return const_local_iterator(_M_bucket_begin(__n)); }
+      { return const_local_iterator(this, _M_bucket_begin(__n), __n); }
 
       const_local_iterator
       end(size_type __n) const
-      { return const_local_iterator(_M_bucket_past_the_end(__n)); }
+      { return const_local_iterator(this, nullptr, __n); }
 
       // DR 691.
       const_local_iterator
       cbegin(size_type __n) const
-      { return const_local_iterator(_M_bucket_begin(__n)); }
+      { return const_local_iterator(this, _M_bucket_begin(__n), __n); }
 
       const_local_iterator
       cend(size_type __n) const
-      { return const_local_iterator(_M_bucket_past_the_end(__n)); }
+      { return const_local_iterator(this, nullptr, __n); }
 
       float
       load_factor() const noexcept
@@ -427,7 +425,16 @@
       std::pair<const_iterator, const_iterator>
       equal_range(const key_type& __k) const;
 
+      size_type
+      _M_bucket_index(_Node* __n) const
+      { return _HCBase::_M_bucket_index(__n, _M_bucket_count); }
+
     private:
+      size_type
+      _M_bucket_index(const key_type& __k,
+		      typename _Hashtable::_Hash_code_type __c) const
+      { return _HCBase::_M_bucket_index(__k, __c, _M_bucket_count); }
+
       // Find and insert helper functions and types
       _Node*
       _M_find_node(size_type, const key_type&,
@@ -679,9 +686,7 @@
       _Node* __n = _M_bucket_begin(__bkt);
       if (__n)
 	for (;; __n = __n->_M_next)
-	  if (!__n->_M_next 
-	      || this->_M_bucket_index(__n->_M_next, _M_bucket_count)
-		 != __bkt)
+	  if (!__n->_M_next || _M_bucket_index(__n->_M_next) != __bkt)
 	    break;
       return __n;
     }
@@ -916,7 +921,7 @@
     find(const key_type& __k)
     {
       typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-      std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+      std::size_t __n = _M_bucket_index(__k, __code);
       _Node* __p = _M_find_node(__n, __k, __code);
       return __p ? iterator(__p) : this->end();
     }
@@ -933,7 +938,7 @@
     find(const key_type& __k) const
     {
       typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-      std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+      std::size_t __n = _M_bucket_index(__k, __code);
       _Node* __p = _M_find_node(__n, __k, __code);
       return __p ? const_iterator(__p) : this->end();
     }
@@ -950,7 +955,7 @@
     count(const key_type& __k) const
     {
       typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-      std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+      std::size_t __n = _M_bucket_index(__k, __code);
       _Node* __p = _M_bucket_begin(__n);
       if (!__p)
 	return 0;
@@ -965,9 +970,7 @@
 	    // equivalent value after an equivalent one it means that we won't
 	    // find anymore an equivalent value.
 	    break;
-	  if (!__p->_M_next
-	      || this->_M_bucket_index(__p->_M_next, _M_bucket_count)
-		 != __n)
+	  if (!__p->_M_next || _M_bucket_index(__p->_M_next) != __n)
 	    break;
 	}
       return __result;
@@ -990,14 +993,13 @@
     equal_range(const key_type& __k)
     {
       typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-      std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+      std::size_t __n = _M_bucket_index(__k, __code);
       _Node* __p = _M_find_node(__n, __k, __code);
 
       if (__p)
 	{
 	  _Node* __p1 = __p->_M_next;
-	  while (__p1
-		 && this->_M_bucket_index(__p1, _M_bucket_count) == __n
+	  while (__p1 && _M_bucket_index(__p1) == __n
 		 && this->_M_compare(__k, __code, __p1))
 	    __p1 = __p1->_M_next;
 
@@ -1024,14 +1026,13 @@
     equal_range(const key_type& __k) const
     {
       typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-      std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+      std::size_t __n = _M_bucket_index(__k, __code);
       _Node* __p = _M_find_node(__n, __k, __code);
 
       if (__p)
 	{
 	  _Node* __p1 = __p->_M_next;
-	  while (__p1
-		 && this->_M_bucket_index(__p1, _M_bucket_count) == __n
+	  while (__p1 && _M_bucket_index(__p1) == __n
 		 && this->_M_compare(__k, __code, __p1))
 	    __p1 = __p1->_M_next;
 
@@ -1062,8 +1063,7 @@
 	{
 	  if (this->_M_compare(__k, __code, __p))
 	    return __p;
-	  if (!(__p->_M_next)
-	      || this->_M_bucket_index(__p->_M_next, _M_bucket_count) != __n)
+	  if (!(__p->_M_next) || _M_bucket_index(__p->_M_next) != __n)
 	    break;
 	}
       return nullptr;
@@ -1119,8 +1119,7 @@
     {
       if (__prev_n->_M_next)
 	{
-	  size_type __next_bkt =
-	    this->_M_bucket_index(__prev_n->_M_next, _M_bucket_count);
+	  size_type __next_bkt = _M_bucket_index(__prev_n->_M_next);
 	  if (__next_bkt != __bkt)
 	    _M_buckets[__next_bkt] = __new_n;
 	}
@@ -1199,8 +1198,7 @@
 	    const key_type& __k = this->_M_extract(__new_node->_M_v);
 	    typename _Hashtable::_Hash_code_type __code
 	      = this->_M_hash_code(__k);
-	    size_type __bkt
-	      = this->_M_bucket_index(__k, __code, _M_bucket_count);
+	    size_type __bkt = _M_bucket_index(__k, __code);
 
 	    if (_Node* __p = _M_find_node(__bkt, __k, __code))
 	      {
@@ -1220,7 +1218,7 @@
 	    if (__do_rehash.first)
 	      {
 		_M_rehash(__do_rehash.second, __saved_state);
-		__bkt = this->_M_bucket_index(__k, __code, _M_bucket_count);
+		__bkt = _M_bucket_index(__k, __code);
 	      }
 
 	    if (_M_buckets[__bkt])
@@ -1262,8 +1260,7 @@
 	    typename _Hashtable::_Hash_code_type __code
 	      = this->_M_hash_code(__k);
 	    this->_M_store_code(__new_node, __code);
-	    size_type __bkt
-	      = this->_M_bucket_index(__k, __code, _M_bucket_count);
+	    size_type __bkt = _M_bucket_index(__k, __code);
 
 	    // Second find the node, avoid rehash if compare throws.
 	    _Node* __prev = _M_find_node(__bkt, __k, __code);
@@ -1271,7 +1268,7 @@
 	    if (__do_rehash.first)
 	      {
 		_M_rehash(__do_rehash.second, __saved_state);
-		__bkt = this->_M_bucket_index(__k, __code, _M_bucket_count);
+		__bkt = _M_bucket_index(__k, __code);
 		// __prev is still valid because rehash do not invalidate nodes
 	      }
 
@@ -1323,7 +1320,7 @@
 	if (__do_rehash.first)
 	  {
 	    const key_type& __k = this->_M_extract(__v);
-	    __n = this->_M_bucket_index(__k, __code, __do_rehash.second);
+	    __n = _HCBase::_M_bucket_index(__k, __code, __do_rehash.second);
 	  }
 
 	_Node* __new_node = nullptr;
@@ -1369,7 +1366,7 @@
       {
 	const key_type& __k = this->_M_extract(__v);
 	typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-	size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+	size_type __n = _M_bucket_index(__k, __code);
 
 	if (_Node* __p = _M_find_node(__n, __k, __code))
 	  return std::make_pair(iterator(__p), false);
@@ -1397,7 +1394,7 @@
 
 	const key_type& __k = this->_M_extract(__v);
 	typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-	size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+	size_type __n = _M_bucket_index(__k, __code);
 
 	// First find the node, avoid leaking new_node if compare throws.
 	_Node* __prev = _M_find_node(__n, __k, __code);
@@ -1410,7 +1407,7 @@
 	    if (__do_rehash.first)
 	      {
 		_M_rehash(__do_rehash.second, __saved_state);
-		__n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+		__n = _M_bucket_index(__k, __code);
 		// __prev is still valid because rehash do not invalidate nodes
 	      }
 
@@ -1477,7 +1474,7 @@
     erase(const_iterator __it)
     {
       _Node* __n = __it._M_cur;
-      std::size_t __bkt = this->_M_bucket_index(__n, _M_bucket_count);
+      std::size_t __bkt = _M_bucket_index(__n);
 
       // Look for previous node to unlink it from the erased one, this is why
       // we need buckets to contain the before begin node of the bucket to make
@@ -1485,12 +1482,10 @@
       _Node* __prev_n = _M_get_previous_node(__bkt, __n);
       if (__n == _M_bucket_begin(__bkt))
 	_M_remove_bucket_begin(__bkt, __n->_M_next,
-	   __n->_M_next ? this->_M_bucket_index(__n->_M_next, _M_bucket_count)
-			: 0);
+	   __n->_M_next ? _M_bucket_index(__n->_M_next) : 0);
       else if (__n->_M_next)
 	{
-	  size_type __next_bkt =
-	    this->_M_bucket_index(__n->_M_next, _M_bucket_count);
+	  size_type __next_bkt = _M_bucket_index(__n->_M_next);
 	  if (__next_bkt != __bkt)
 	    _M_buckets[__next_bkt] = __prev_n;
 	}
@@ -1516,7 +1511,7 @@
     erase(const key_type& __k)
     {
       typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
-      std::size_t __bkt = this->_M_bucket_index(__k, __code, _M_bucket_count);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
       // Look for the first matching node with its previous node at the same
       // time
       _Node* __n = _M_buckets[__bkt];
@@ -1533,8 +1528,7 @@
 	{
 	  if (this->_M_compare(__k, __code, __n))
 	    break;
-	  if (!(__n->_M_next)
-	      || this->_M_bucket_index(__n->_M_next, _M_bucket_count) != __bkt)
+	  if (!(__n->_M_next) || _M_bucket_index(__n->_M_next) != __bkt)
 	    return 0;
 	  __is_bucket_begin = false;
 	}
@@ -1560,7 +1554,7 @@
 	  ++__result;
 	  if (!__next_n)
 	    break;
-	  __next_bkt = this->_M_bucket_index(__next_n, _M_bucket_count);
+	  __next_bkt = _M_bucket_index(__next_n);
 	}
       while (__next_bkt == __bkt && this->_M_compare(__k, __code, __next_n));
 
@@ -1591,7 +1585,7 @@
       if (__n == __last_n)
 	return iterator(__n);
 
-      std::size_t __bkt = this->_M_bucket_index(__n, _M_bucket_count);
+      std::size_t __bkt = _M_bucket_index(__n);
 
       _Node* __prev_n = _M_get_previous_node(__bkt, __n);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
@@ -1606,7 +1600,7 @@
 	      --_M_element_count;
 	      if (!__n)
 		break;
-	      __n_bkt = this->_M_bucket_index(__n, _M_bucket_count);
+	      __n_bkt = _M_bucket_index(__n);
 	    }
 	  while (__n != __last_n && __n_bkt == __bkt);
 	  if (__is_bucket_begin)
@@ -1672,7 +1666,7 @@
 	  while (__p)
 	    {
 	      _Node* __next = __p->_M_next;
-	      std::size_t __new_index = this->_M_bucket_index(__p, __n);
+	      std::size_t __new_index = _HCBase::_M_bucket_index(__p, __n);
 	      if (!__new_buckets[__new_index])
 		// Store temporarily bucket end node in _M_buckets if possible.
 		// This will boost second loop where we need to access bucket
Index: include/bits/hashtable_policy.h
===================================================================
--- include/bits/hashtable_policy.h	(revision 182187)
+++ include/bits/hashtable_policy.h	(working copy)
@@ -101,8 +101,7 @@
 	  _M_next() { }
     };
 
-  // Local iterators, used to iterate within a bucket but not between
-  // buckets.
+  // Node iterators, used to iterate through all the hashtable.
   template<typename _Value, bool __cache>
     struct _Node_iterator_base
     {
@@ -218,6 +217,146 @@
       }
     };
 
+  // Local iterators, used to iterate within a bucket but not between
+  // buckets.
+  template<typename _Hashtable, typename _Value, bool __cache>
+    struct _Local_iterator_base
+    {
+      _Local_iterator_base(const _Hashtable* __hashtable,
+			   _Hash_node<_Value, __cache>* __p,
+			   std::size_t __bkt)
+      : _M_hashtable(__hashtable), _M_cur(__p), _M_bucket(__bkt) { }
+
+      void
+      _M_incr()
+      {
+	_M_cur = _M_cur->_M_next;
+	if (_M_cur)
+	  {
+	    std::size_t __bkt = _M_hashtable->_M_bucket_index(_M_cur);
+	    if (__bkt != _M_bucket)
+	      _M_cur = nullptr;
+	  }
+      }
+
+      const _Hashtable* _M_hashtable;
+      _Hash_node<_Value, __cache>*  _M_cur;
+      std::size_t _M_bucket;
+    };
+
+  template<typename _Hashtable, typename _Value, bool __cache>
+    inline bool
+    operator==(const _Local_iterator_base<_Hashtable, _Value, __cache>& __x,
+	       const _Local_iterator_base<_Hashtable, _Value, __cache>& __y)
+    { return __x._M_cur == __y._M_cur; }
+
+  template<typename _Hashtable, typename _Value, bool __cache>
+    inline bool
+    operator!=(const _Local_iterator_base<_Hashtable, _Value, __cache>& __x,
+	       const _Local_iterator_base<_Hashtable, _Value, __cache>& __y)
+    { return __x._M_cur != __y._M_cur; }
+
+  template<typename _Hashtable,
+	   typename _Value, bool __constant_iterators, bool __cache>
+    struct _Local_iterator
+    : public _Local_iterator_base<_Hashtable, _Value, __cache>
+    {
+      typedef _Value                                   value_type;
+      typedef typename std::conditional<__constant_iterators,
+					const _Value*, _Value*>::type
+						       pointer;
+      typedef typename std::conditional<__constant_iterators,
+					const _Value&, _Value&>::type
+						       reference;
+      typedef std::ptrdiff_t                           difference_type;
+      typedef std::forward_iterator_tag                iterator_category;
+
+      _Local_iterator()
+      : _Local_iterator_base<_Hashtable, _Value, __cache>(nullptr, nullptr, 0)
+      { }
+
+      explicit
+      _Local_iterator(const _Hashtable* __hashtable,
+		      _Hash_node<_Value, __cache>* __p,
+		      std::size_t __bkt)
+      : _Local_iterator_base<_Hashtable, _Value, __cache>(__hashtable,
+							  __p, __bkt) { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v; }
+
+      pointer
+      operator->() const
+      { return std::__addressof(this->_M_cur->_M_v); }
+
+      _Local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Local_iterator
+      operator++(int)
+      {
+	_Local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  template<typename _Hashtable,
+	   typename _Value, bool __constant_iterators, bool __cache>
+    struct _Local_const_iterator
+    : public _Local_iterator_base<_Hashtable, _Value, __cache>
+    {
+      typedef _Value                                   value_type;
+      typedef const _Value*                            pointer;
+      typedef const _Value&                            reference;
+      typedef std::ptrdiff_t                           difference_type;
+      typedef std::forward_iterator_tag                iterator_category;
+
+      _Local_const_iterator()
+      : _Local_iterator_base<_Hashtable, _Value, __cache>(nullptr, nullptr, 0)
+      { }
+
+      explicit
+      _Local_const_iterator(const _Hashtable* __hashtable,
+			    _Hash_node<_Value, __cache>* __p,
+			    std::size_t __bkt)
+      : _Local_iterator_base<_Hashtable, _Value, __cache>(__hashtable,
+							  __p, __bkt) { }
+
+      _Local_const_iterator(const _Local_iterator<_Hashtable, _Value,
+			    __constant_iterators, __cache>& __x)
+      : _Local_iterator_base<_Hashtable, _Value, __cache>(__x._M_hashtable,
+					      __x._M_cur, __x._M_bucket) { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v; }
+
+      pointer
+      operator->() const
+      { return std::__addressof(this->_M_cur->_M_v); }
+
+      _Local_const_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Local_const_iterator
+      operator++(int)
+      {
+	_Local_const_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
   // Many of class template _Hashtable's template parameters are policy
   // classes.  These are defaults for the policies.
 
@@ -425,8 +564,7 @@
     {
       _Hashtable* __h = static_cast<_Hashtable*>(this);
       typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code,
-					     __h->_M_bucket_count);
+      std::size_t __n = __h->_M_bucket_index(__k, __code);
 
       typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
       if (!__p)
@@ -443,8 +581,7 @@
     {
       _Hashtable* __h = static_cast<_Hashtable*>(this);
       typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code,
-					     __h->_M_bucket_count);
+      std::size_t __n = __h->_M_bucket_index(__k, __code);
 
       typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
       if (!__p)
@@ -462,8 +599,7 @@
     {
       _Hashtable* __h = static_cast<_Hashtable*>(this);
       typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code,
-					     __h->_M_bucket_count);
+      std::size_t __n = __h->_M_bucket_index(__k, __code);
 
       typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
       if (!__p)
@@ -479,8 +615,7 @@
     {
       const _Hashtable* __h = static_cast<const _Hashtable*>(this);
       typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code,
-					     __h->_M_bucket_count);
+      std::size_t __n = __h->_M_bucket_index(__k, __code);
 
       typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
       if (!__p)

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]