This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[v3] mt_allocator: trivial changes + dealloc loop speedup


Hi,

the below just uglifies consistently all the names, does a few
trivial cleanups and improves performancewise the deallocation
loop (which happens under lock!).

Tested check/check-performance on x86-linux.

Paolo.

////////////////
2004-03-27  Paolo Carlini  <pcarlini@suse.de>

	* include/ext/mt_allocator.h: Uglify consistently names of
	variables, members and classes; tidy.

2004-03-27  Dhruv Matani  <dhruvbird@gmx.net>

	* include/ext/mt_allocator.h (__mt_alloc<>::deallocate):
	Deallocation loop rewrote.
diff -prN libstdc++-v3-orig/include/ext/mt_allocator.h libstdc++-v3/include/ext/mt_allocator.h
*** libstdc++-v3-orig/include/ext/mt_allocator.h	Fri Mar 26 17:25:19 2004
--- libstdc++-v3/include/ext/mt_allocator.h	Sat Mar 27 01:32:29 2004
*************** namespace __gnu_cxx
*** 57,69 ****
      class __mt_alloc
      {
      public:
!       typedef size_t     size_type;
!       typedef ptrdiff_t  difference_type;
!       typedef _Tp*       pointer;
!       typedef const _Tp* const_pointer;
!       typedef _Tp&       reference;
!       typedef const _Tp& const_reference;
!       typedef _Tp        value_type;
  
        template<typename _Tp1>
          struct rebind
--- 57,69 ----
      class __mt_alloc
      {
      public:
!       typedef size_t                    size_type;
!       typedef ptrdiff_t                 difference_type;
!       typedef _Tp*                      pointer;
!       typedef const _Tp*                const_pointer;
!       typedef _Tp&                      reference;
!       typedef const _Tp&                const_reference;
!       typedef _Tp                       value_type;
  
        template<typename _Tp1>
          struct rebind
*************** namespace __gnu_cxx
*** 88,97 ****
        ~__mt_alloc() throw() { }
  
        pointer
!       address(reference __x) const { return &__x; }
  
        const_pointer
!       address(const_reference __x) const { return &__x; }
  
        size_type
        max_size() const throw() 
--- 88,99 ----
        ~__mt_alloc() throw() { }
  
        pointer
!       address(reference __x) const
!       { return &__x; }
  
        const_pointer
!       address(const_reference __x) const
!       { return &__x; }
  
        size_type
        max_size() const throw() 
*************** namespace __gnu_cxx
*** 107,120 ****
        destroy(pointer __p) { __p->~_Tp(); }
  
        pointer
!       allocate(size_t __n, const void* = 0);
  
        void
        deallocate(pointer __p, size_type __n);
  
        // Variables used to configure the behavior of the allocator,
        // assigned and explained in detail below.
!       struct tune
        {
  	// Allocation requests (after round-up to power of 2) below
  	// this value will be handled by the allocator. A raw new/
--- 109,122 ----
        destroy(pointer __p) { __p->~_Tp(); }
  
        pointer
!       allocate(size_type __n, const void* = 0);
  
        void
        deallocate(pointer __p, size_type __n);
  
        // Variables used to configure the behavior of the allocator,
        // assigned and explained in detail below.
!       struct _Tune
        {
  	// Allocation requests (after round-up to power of 2) below
  	// this value will be handled by the allocator. A raw new/
*************** namespace __gnu_cxx
*** 146,152 ****
  	// Set to true forces all allocations to use new().
  	bool 	_M_force_new; 
       
! 	explicit tune() 
  	: _M_max_bytes(128), _M_min_bin(8),
  	  _M_chunk_size(4096 - 4 * sizeof(void*)), 
  #ifdef __GTHREADS
--- 148,154 ----
  	// Set to true forces all allocations to use new().
  	bool 	_M_force_new; 
       
! 	explicit _Tune()
  	: _M_max_bytes(128), _M_min_bin(8),
  	  _M_chunk_size(4096 - 4 * sizeof(void*)), 
  #ifdef __GTHREADS
*************** namespace __gnu_cxx
*** 158,165 ****
  	  _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false) 
  	{ }      
  
! 	explicit tune(size_t __maxb, size_t __minbin, size_t __chunk,
! 		      size_t __maxthreads, size_t __headroom, bool __force) 
  	: _M_max_bytes(__maxb), _M_min_bin(__minbin), _M_chunk_size(__chunk), 
  	  _M_max_threads(__maxthreads), _M_freelist_headroom(__headroom), 
  	  _M_force_new(__force)
--- 160,167 ----
  	  _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false) 
  	{ }      
  
! 	explicit _Tune(size_t __maxb, size_t __minbin, size_t __chunk,
! 		       size_t __maxthreads, size_t __headroom, bool __force) 
  	: _M_max_bytes(__maxb), _M_min_bin(__minbin), _M_chunk_size(__chunk), 
  	  _M_max_threads(__maxthreads), _M_freelist_headroom(__headroom), 
  	  _M_force_new(__force)
*************** namespace __gnu_cxx
*** 174,190 ****
  #endif
        static bool 			_S_init;
  
!       static void 
        _S_initialize();
  
        // Configuration options.
!       static tune 	       		_S_options;
  
!       static const tune
!       _S_get_options() { return _S_options; }
  
        static void
!       _S_set_options(tune __t)
        { 
  	if (!_S_init)
  	  _S_options = __t;
--- 176,193 ----
  #endif
        static bool 			_S_init;
  
!       static void
        _S_initialize();
  
        // Configuration options.
!       static _Tune 	       		_S_options;
  
!       static const _Tune
!       _S_get_options()
!       { return _S_options; }
  
        static void
!       _S_set_options(_Tune __t)
        { 
  	if (!_S_init)
  	  _S_options = __t;
*************** namespace __gnu_cxx
*** 192,199 ****
  
        // Using short int as type for the binmap implies we are never
        // caching blocks larger than 65535 with this allocator
!       typedef unsigned short int binmap_type;
!       static binmap_type* 		_S_binmap;
  
        // Each requesting thread is assigned an id ranging from 1 to
        // _S_max_threads. Thread id 0 is used as a global memory pool.
--- 195,202 ----
  
        // Using short int as type for the binmap implies we are never
        // caching blocks larger than 65535 with this allocator
!       typedef unsigned short int        _Binmap_type;
!       static _Binmap_type* 		_S_binmap;
  
        // Each requesting thread is assigned an id ranging from 1 to
        // _S_max_threads. Thread id 0 is used as a global memory pool.
*************** namespace __gnu_cxx
*** 205,267 ****
        // (i.e. the thread dies) is called, we return the thread id to
        // the front of this list.
  #ifdef __GTHREADS
!       struct thread_record
        {
          // Points to next free thread id record. NULL if last record in list.
!         thread_record* volatile next;
  
  	// Thread id ranging from 1 to _S_max_threads.
!         size_t id;
        };
  
!       static thread_record* volatile 	_S_thread_freelist_first;
        static __gthread_mutex_t 		_S_thread_freelist_mutex;
        static __gthread_key_t 		_S_thread_key;
  
        static void 
!       _S_destroy_thread_key(void* freelist_pos);
  #endif
  
        static size_t 
        _S_get_thread_id();
  
!       union block_record
        {
  	// Points to the next block_record for its thread_id.
!         block_record* volatile next;
  
  	// The thread id of the thread which has requested this block.
  #ifdef __GTHREADS
!         size_t thread_id;
  #endif
        };
  
!       struct bin_record
        {
  	// An "array" of pointers to the first free block for each
  	// thread id. Memory to this "array" is allocated in _S_initialize()
  	// for _S_max_threads + global pool 0.
!         block_record** volatile first;
  
  	// An "array" of counters used to keep track of the amount of
  	// blocks that are on the freelist/used for each thread id.
  	// Memory to these "arrays" is allocated in _S_initialize() for
  	// _S_max_threads + global pool 0.
!         size_t* volatile free;
!         size_t* volatile used;
  
  	// Each bin has its own mutex which is used to ensure data
  	// integrity while changing "ownership" on a block.  The mutex
  	// is initialized in _S_initialize().
  #ifdef __GTHREADS
!         __gthread_mutex_t* mutex;
  #endif
        };
  
        // An "array" of bin_records each of which represents a specific
        // power of 2 size. Memory to this "array" is allocated in
        // _S_initialize().
!       static bin_record* volatile     	_S_bin;
  
        // Actual value calculated in _S_initialize().
        static size_t 	       	     	_S_bin_size; 
--- 208,270 ----
        // (i.e. the thread dies) is called, we return the thread id to
        // the front of this list.
  #ifdef __GTHREADS
!       struct _Thread_record
        {
          // Points to next free thread id record. NULL if last record in list.
!         _Thread_record* volatile        _M_next;
  
  	// Thread id ranging from 1 to _S_max_threads.
!         size_t                          _M_id;
        };
  
!       static _Thread_record* volatile 	_S_thread_freelist_first;
        static __gthread_mutex_t 		_S_thread_freelist_mutex;
        static __gthread_key_t 		_S_thread_key;
  
        static void 
!       _S_destroy_thread_key(void* __freelist_pos);
  #endif
  
        static size_t 
        _S_get_thread_id();
  
!       union _Block_record
        {
  	// Points to the next block_record for its thread_id.
!         _Block_record* volatile         _M_next;
  
  	// The thread id of the thread which has requested this block.
  #ifdef __GTHREADS
!         size_t                          _M_thread_id;
  #endif
        };
  
!       struct _Bin_record
        {
  	// An "array" of pointers to the first free block for each
  	// thread id. Memory to this "array" is allocated in _S_initialize()
  	// for _S_max_threads + global pool 0.
!         _Block_record** volatile        _M_first;
  
  	// An "array" of counters used to keep track of the amount of
  	// blocks that are on the freelist/used for each thread id.
  	// Memory to these "arrays" is allocated in _S_initialize() for
  	// _S_max_threads + global pool 0.
!         size_t* volatile                _M_free;
!         size_t* volatile                _M_used;
  
  	// Each bin has its own mutex which is used to ensure data
  	// integrity while changing "ownership" on a block.  The mutex
  	// is initialized in _S_initialize().
  #ifdef __GTHREADS
!         __gthread_mutex_t*              _M_mutex;
  #endif
        };
  
        // An "array" of bin_records each of which represents a specific
        // power of 2 size. Memory to this "array" is allocated in
        // _S_initialize().
!       static _Bin_record* volatile     	_S_bin;
  
        // Actual value calculated in _S_initialize().
        static size_t 	       	     	_S_bin_size; 
*************** namespace __gnu_cxx
*** 270,276 ****
    template<typename _Tp>
      typename __mt_alloc<_Tp>::pointer
      __mt_alloc<_Tp>::
!     allocate(size_t __n, const void*)
      {
        // Although the test in __gthread_once() would suffice, we wrap
        // test of the once condition in our own unlocked check. This
--- 273,279 ----
    template<typename _Tp>
      typename __mt_alloc<_Tp>::pointer
      __mt_alloc<_Tp>::
!     allocate(size_type __n, const void*)
      {
        // Although the test in __gthread_once() would suffice, we wrap
        // test of the once condition in our own unlocked check. This
*************** namespace __gnu_cxx
*** 295,314 ****
  	  void* __ret = ::operator new(__bytes);
  	  return static_cast<_Tp*>(__ret);
  	}
!       
        // Round up to power of 2 and figure out which bin to use.
        const size_t __which = _S_binmap[__bytes];      
        const size_t __thread_id = _S_get_thread_id();
        
        // Find out if we have blocks on our freelist.  If so, go ahead
        // and use them directly without having to lock anything.
!       const bin_record& __bin = _S_bin[__which];
!       block_record* block = NULL;
!       if (__bin.first[__thread_id] == NULL)
  	{
  	  // Are we using threads?
  	  // - Yes, check if there are free blocks on the global
! 	  //   list. If so, grab up to block_count blocks in one
  	  //   lock and change ownership. If the global list is 
  	  //   empty, we allocate a new chunk and add those blocks 
  	  //   directly to our own freelist (with us as owner).
--- 298,321 ----
  	  void* __ret = ::operator new(__bytes);
  	  return static_cast<_Tp*>(__ret);
  	}
! 
        // Round up to power of 2 and figure out which bin to use.
        const size_t __which = _S_binmap[__bytes];      
        const size_t __thread_id = _S_get_thread_id();
        
        // Find out if we have blocks on our freelist.  If so, go ahead
        // and use them directly without having to lock anything.
!       const _Bin_record& __bin = _S_bin[__which];
!       _Block_record* __block = NULL;
!       if (__bin._M_first[__thread_id] == NULL)
  	{
+ 	  const size_t __bin_size = ((_S_options._M_min_bin << __which)
+ 				     + sizeof(_Block_record));
+ 	  size_t __block_count = _S_options._M_chunk_size / __bin_size;	  
+ 
  	  // Are we using threads?
  	  // - Yes, check if there are free blocks on the global
! 	  //   list. If so, grab up to __block_count blocks in one
  	  //   lock and change ownership. If the global list is 
  	  //   empty, we allocate a new chunk and add those blocks 
  	  //   directly to our own freelist (with us as owner).
*************** namespace __gnu_cxx
*** 319,425 ****
  #ifdef __GTHREADS
  	  if (__gthread_active_p())
  	    {
! 	      const size_t bin_size = ((_S_options._M_min_bin << __which)
! 				       + sizeof(block_record));
! 	      size_t block_count = _S_options._M_chunk_size / bin_size;
! 	      
! 	      __gthread_mutex_lock(__bin.mutex);	      
! 	      if (__bin.first[0] == NULL)
  		{
  		  // No need to hold the lock when we are adding a
  		  // whole chunk to our own list.
! 		  __gthread_mutex_unlock(__bin.mutex);
  		  
! 		  void* v = ::operator new(_S_options._M_chunk_size);
! 		  __bin.first[__thread_id] = static_cast<block_record*>(v);
! 		  
! 		  __bin.free[__thread_id] = block_count;		  
! 		  block_count--;
! 		  block = __bin.first[__thread_id];
! 		  
! 		  while (block_count > 0)
  		    {
! 		      char* c = reinterpret_cast<char*>(block) + bin_size;
! 		      block->next = reinterpret_cast<block_record*>(c);
! 		      block = block->next;
! 		      block_count--;
  		    }
! 		  block->next = NULL;
  		}
  	      else
  		{
! 		  size_t global_count = 0;		  
! 		  block_record* tmp;		  
! 		  while (__bin.first[0] != NULL && global_count < block_count)
  		    {
! 		      tmp = __bin.first[0]->next;
! 		      block = __bin.first[0];
  
! 		      block->next = __bin.first[__thread_id];
! 		      __bin.first[__thread_id] = block;		      
  		      
! 		      __bin.free[__thread_id]++;
! 		      __bin.first[0] = tmp;
! 		      global_count++;
  		    }
! 		  __gthread_mutex_unlock(__bin.mutex);
  		}
  	      
  	      // Return the first newly added block in our list and
  	      // update the counters
! 	      block = __bin.first[__thread_id];
! 	      __bin.first[__thread_id] = __bin.first[__thread_id]->next; 
! 	      block->thread_id = __thread_id;
! 	      __bin.free[__thread_id]--;
! 	      __bin.used[__thread_id]++;
  	    }
  	  else
  #endif
  	    {
  	      void* __v = ::operator new(_S_options._M_chunk_size);
! 	      __bin.first[0] = static_cast<block_record*>(__v);
! 	      
! 	      const size_t bin_size = ((_S_options._M_min_bin << __which)
! 				       + sizeof(block_record));
! 	      size_t block_count = _S_options._M_chunk_size / bin_size;
  	      
! 	      block_count--;
! 	      block = __bin.first[0];
! 	      while (block_count > 0)
  		{
! 		  char* __c = reinterpret_cast<char*>(block) + bin_size;
! 		  block->next = reinterpret_cast<block_record*>(__c);
! 		  block = block->next;
! 		  block_count--;
  		}
! 	      block->next = NULL;
  	      
  	      // Remove from list.
! 	      block = __bin.first[0];
! 	      __bin.first[0] = __bin.first[0]->next;
  	    }
  	}
        else
  	{
  	  // "Default" operation - we have blocks on our own freelist
  	  // grab the first record and update the counters.
! 	  block = __bin.first[__thread_id];
! 	  __bin.first[__thread_id] = __bin.first[__thread_id]->next;
  
  #ifdef __GTHREADS
- 	  block->thread_id = __thread_id;
  	  if (__gthread_active_p())
  	    {
! 	      __bin.free[__thread_id]--;
! 	      __bin.used[__thread_id]++;
  	    }
  #endif
  	}
!       char* __c = reinterpret_cast<char*>(block) + sizeof(block_record);
        return static_cast<_Tp*>(static_cast<void*>(__c));
      }
    
- 
    template<typename _Tp>
      void
      __mt_alloc<_Tp>::
--- 326,420 ----
  #ifdef __GTHREADS
  	  if (__gthread_active_p())
  	    {
! 	      __gthread_mutex_lock(__bin._M_mutex);
! 	      if (__bin._M_first[0] == NULL)
  		{
  		  // No need to hold the lock when we are adding a
  		  // whole chunk to our own list.
! 		  __gthread_mutex_unlock(__bin._M_mutex);
  		  
! 		  void* __v = ::operator new(_S_options._M_chunk_size);
! 		  __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
! 		  __bin._M_free[__thread_id] = __block_count;		  
! 
! 		  --__block_count;
! 		  __block = __bin._M_first[__thread_id];
! 		  while (__block_count > 0)
  		    {
! 		      char* __c = reinterpret_cast<char*>(__block) + __bin_size;
! 		      __block->_M_next = reinterpret_cast<_Block_record*>(__c);
! 		      __block = __block->_M_next;
! 		      --__block_count;
  		    }
! 		  __block->_M_next = NULL;
  		}
  	      else
  		{
! 		  while (__bin._M_first[0] != NULL && __block_count > 0)
  		    {
! 		      _Block_record* __tmp = __bin._M_first[0]->_M_next;
! 		      __block = __bin._M_first[0];
  
! 		      __block->_M_next = __bin._M_first[__thread_id];
! 		      __bin._M_first[__thread_id] = __block;		      
  		      
! 		      ++__bin._M_free[__thread_id];
! 		      __bin._M_first[0] = __tmp;
! 		      --__block_count;
  		    }
! 		  __gthread_mutex_unlock(__bin._M_mutex);
  		}
  	      
  	      // Return the first newly added block in our list and
  	      // update the counters
! 	      __block = __bin._M_first[__thread_id];
! 	      __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
! 	      __block->_M_thread_id = __thread_id;
! 	      --__bin._M_free[__thread_id];
! 	      ++__bin._M_used[__thread_id];
  	    }
  	  else
  #endif
  	    {
  	      void* __v = ::operator new(_S_options._M_chunk_size);
! 	      __bin._M_first[0] = static_cast<_Block_record*>(__v);
  	      
! 	      --__block_count;
! 	      __block = __bin._M_first[0];
! 	      while (__block_count > 0)
  		{
! 		  char* __c = reinterpret_cast<char*>(__block) + __bin_size;
! 		  __block->_M_next = reinterpret_cast<_Block_record*>(__c);
! 		  __block = __block->_M_next;
! 		  --__block_count;
  		}
! 	      __block->_M_next = NULL;
  	      
  	      // Remove from list.
! 	      __block = __bin._M_first[0];
! 	      __bin._M_first[0] = __bin._M_first[0]->_M_next;
  	    }
  	}
        else
  	{
  	  // "Default" operation - we have blocks on our own freelist
  	  // grab the first record and update the counters.
! 	  __block = __bin._M_first[__thread_id];
! 	  __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
  
  #ifdef __GTHREADS
  	  if (__gthread_active_p())
  	    {
! 	      __block->_M_thread_id = __thread_id;
! 	      --__bin._M_free[__thread_id];
! 	      ++__bin._M_used[__thread_id];
  	    }
  #endif
  	}
!       char* __c = reinterpret_cast<char*>(__block) + sizeof(_Block_record);
        return static_cast<_Tp*>(static_cast<void*>(__c));
      }
    
    template<typename _Tp>
      void
      __mt_alloc<_Tp>::
*************** namespace __gnu_cxx
*** 436,493 ****
        
        // Round up to power of 2 and figure out which bin to use.
        const size_t __which = _S_binmap[__bytes];
!       const bin_record& __bin = _S_bin[__which];
  
!       char* __c = reinterpret_cast<char*>(__p) - sizeof(block_record);
!       block_record* block = reinterpret_cast<block_record*>(__c);
        
  #ifdef __GTHREADS
-       const size_t thread_id = _S_get_thread_id();
        if (__gthread_active_p())
  	{
  	  // Calculate the number of records to remove from our freelist.
! 	  int remove = __bin.free[thread_id] -
! 	    (__bin.used[thread_id] / _S_options._M_freelist_headroom);
  
  	  // The calculation above will almost always tell us to
  	  // remove one or two records at a time, but this creates too
  	  // much contention when locking and therefore we wait until
  	  // the number of records is "high enough".
  	  int __cond1 = static_cast<int>(100 * (_S_bin_size - __which));
! 	  int __cond2 = static_cast<int>(__bin.free[thread_id]
  					 / _S_options._M_freelist_headroom);
! 	  if (remove > __cond1 && remove > __cond2)
  	    {
! 	      __gthread_mutex_lock(__bin.mutex);
! 	      block_record* tmp;
! 	      while (remove > 0)
  		{
! 		  tmp = __bin.first[thread_id]->next;
! 		  __bin.first[thread_id]->next = __bin.first[0];
! 		  __bin.first[0] = __bin.first[thread_id];
! 		  
! 		  __bin.first[thread_id] = tmp;
! 		  __bin.free[thread_id]--;
! 		  remove--;
  		}
! 	      __gthread_mutex_unlock(__bin.mutex);
  	    }
  	  
  	  // Return this block to our list and update counters and
  	  // owner id as needed.
! 	  __bin.used[block->thread_id]--;
  
! 	  block->next = __bin.first[thread_id];
! 	  __bin.first[thread_id] = block;
  	  
! 	  __bin.free[thread_id]++;
  	}
        else
  #endif
  	{
  	  // Single threaded application - return to global pool.
! 	  block->next = __bin.first[0];
! 	  __bin.first[0] = block;
  	}
      }
    
--- 431,490 ----
        
        // Round up to power of 2 and figure out which bin to use.
        const size_t __which = _S_binmap[__bytes];
!       const _Bin_record& __bin = _S_bin[__which];
  
!       char* __c = reinterpret_cast<char*>(__p) - sizeof(_Block_record);
!       _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
        
  #ifdef __GTHREADS
        if (__gthread_active_p())
  	{
  	  // Calculate the number of records to remove from our freelist.
! 	  const size_t __thread_id = _S_get_thread_id();
! 	  int __remove = (__bin._M_free[__thread_id]
! 			  - (__bin._M_used[__thread_id]
! 			     / _S_options._M_freelist_headroom));
  
  	  // The calculation above will almost always tell us to
  	  // remove one or two records at a time, but this creates too
  	  // much contention when locking and therefore we wait until
  	  // the number of records is "high enough".
  	  int __cond1 = static_cast<int>(100 * (_S_bin_size - __which));
! 	  int __cond2 = static_cast<int>(__bin._M_free[__thread_id]
  					 / _S_options._M_freelist_headroom);
! 	  if (__remove > __cond1 && __remove > __cond2)
  	    {
! 	      __gthread_mutex_lock(__bin._M_mutex);
! 	      _Block_record* __tmp = __bin._M_first[__thread_id];
! 	      _Block_record* __first = __tmp;
! 	      const int __removed = __remove;
! 	      while (__remove > 1)
  		{
! 		  __tmp = __tmp->_M_next;
! 		  --__remove;
  		}
! 	      __bin._M_first[__thread_id] = __tmp->_M_next;
! 	      __tmp->_M_next = __bin._M_first[0];
! 	      __bin._M_first[0] = __first;
! 	      __bin._M_free[__thread_id] -= __removed;
! 	      __gthread_mutex_unlock(__bin._M_mutex);
  	    }
  	  
  	  // Return this block to our list and update counters and
  	  // owner id as needed.
! 	  --__bin._M_used[__block->_M_thread_id];
  
! 	  __block->_M_next = __bin._M_first[__thread_id];
! 	  __bin._M_first[__thread_id] = __block;
  	  
! 	  ++__bin._M_free[__thread_id];
  	}
        else
  #endif
  	{
  	  // Single threaded application - return to global pool.
! 	  __block->_M_next = __bin._M_first[0];
! 	  __bin._M_first[0] = __block;
  	}
      }
    
*************** namespace __gnu_cxx
*** 505,526 ****
        while (_S_options._M_max_bytes > __bin_size)
  	{
  	  __bin_size <<= 1;
! 	  _S_bin_size++;
  	}
  
        // Setup the bin map for quick lookup of the relevant bin.
!       const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(binmap_type);
!       _S_binmap = static_cast<binmap_type*>(::operator new(__j));
  
!       binmap_type* __bp = _S_binmap;
!       binmap_type __bin_max = _S_options._M_min_bin;
!       binmap_type __bint = 0;
!       for (binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; __ct++)
          {
            if (__ct > __bin_max)
              {
                __bin_max <<= 1;
!               __bint++;
              }
            *__bp++ = __bint;
          }
--- 502,523 ----
        while (_S_options._M_max_bytes > __bin_size)
  	{
  	  __bin_size <<= 1;
! 	  ++_S_bin_size;
  	}
  
        // Setup the bin map for quick lookup of the relevant bin.
!       const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
!       _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
  
!       _Binmap_type* __bp = _S_binmap;
!       _Binmap_type __bin_max = _S_options._M_min_bin;
!       _Binmap_type __bint = 0;
!       for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
          {
            if (__ct > __bin_max)
              {
                __bin_max <<= 1;
!               ++__bint;
              }
            *__bp++ = __bint;
          }
*************** namespace __gnu_cxx
*** 532,555 ****
  #ifdef __GTHREADS
        if (__gthread_active_p())
          {
! 	  const size_t __k = sizeof(thread_record) * _S_options._M_max_threads;
  	  __v = ::operator new(__k);
!           _S_thread_freelist_first = static_cast<thread_record*>(__v);
  
  	  // NOTE! The first assignable thread id is 1 since the
  	  // global pool uses id 0
            size_t __i;
!           for (__i = 1; __i < _S_options._M_max_threads; __i++)
              {
! 	      thread_record& __tr = _S_thread_freelist_first[__i - 1];
!               __tr.next = &_S_thread_freelist_first[__i];
!               __tr.id = __i;
              }
  
            // Set last record.
!           _S_thread_freelist_first[__i - 1].next = NULL;
!           _S_thread_freelist_first[__i - 1].id = __i;
! 
  
  	  // Make sure this is initialized.
  #ifndef __GTHREAD_MUTEX_INIT
--- 529,551 ----
  #ifdef __GTHREADS
        if (__gthread_active_p())
          {
! 	  const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
  	  __v = ::operator new(__k);
!           _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
  
  	  // NOTE! The first assignable thread id is 1 since the
  	  // global pool uses id 0
            size_t __i;
!           for (__i = 1; __i < _S_options._M_max_threads; ++__i)
              {
! 	      _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
!               __tr._M_next = &_S_thread_freelist_first[__i];
!               __tr._M_id = __i;
              }
  
            // Set last record.
!           _S_thread_freelist_first[__i - 1]._M_next = NULL;
!           _S_thread_freelist_first[__i - 1]._M_id = __i;
  
  	  // Make sure this is initialized.
  #ifndef __GTHREAD_MUTEX_INIT
*************** namespace __gnu_cxx
*** 562,569 ****
  #endif
  
        // Initialize _S_bin and its members.
!       __v = ::operator new(sizeof(bin_record) * _S_bin_size);
!       _S_bin = static_cast<bin_record*>(__v);
  	
        // Maximum number of threads. 
        size_t __max_threads = 1;
--- 558,565 ----
  #endif
  
        // Initialize _S_bin and its members.
!       __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
!       _S_bin = static_cast<_Bin_record*>(__v);
  	
        // Maximum number of threads. 
        size_t __max_threads = 1;
*************** namespace __gnu_cxx
*** 572,615 ****
          __max_threads = _S_options._M_max_threads + 1;
  #endif
  
!       for (size_t __n = 0; __n < _S_bin_size; __n++)
          {
! 	  bin_record& __bin = _S_bin[__n];
! 	  __v = ::operator new(sizeof(block_record*) * __max_threads);
!           __bin.first = static_cast<block_record**>(__v);
  
  #ifdef __GTHREADS
            if (__gthread_active_p())
              {
  	      __v = ::operator new(sizeof(size_t) * __max_threads);
!               __bin.free = static_cast<size_t*>(__v);
  
  	      __v = ::operator new(sizeof(size_t) * __max_threads);
!               __bin.used = static_cast<size_t*>(__v);
  
  	      __v = ::operator new(sizeof(__gthread_mutex_t));
!               __bin.mutex = static_cast<__gthread_mutex_t*>(__v);
  
  #ifdef __GTHREAD_MUTEX_INIT
                {
                  // Do not copy a POSIX/gthr mutex once in use.
                  __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
!                 *__bin.mutex = __tmp;
                }
  #else
!               { __GTHREAD_MUTEX_INIT_FUNCTION(__bin.mutex); }
  #endif
              }
  #endif
  
!           for (size_t __threadn = 0; __threadn < __max_threads; __threadn++)
              {
!               __bin.first[__threadn] = NULL;
  #ifdef __GTHREADS
                if (__gthread_active_p())
                  {
!                   __bin.free[__threadn] = 0;
!                   __bin.used[__threadn] = 0;
                  }
  #endif
              }
--- 568,611 ----
          __max_threads = _S_options._M_max_threads + 1;
  #endif
  
!       for (size_t __n = 0; __n < _S_bin_size; ++__n)
          {
! 	  _Bin_record& __bin = _S_bin[__n];
! 	  __v = ::operator new(sizeof(_Block_record*) * __max_threads);
!           __bin._M_first = static_cast<_Block_record**>(__v);
  
  #ifdef __GTHREADS
            if (__gthread_active_p())
              {
  	      __v = ::operator new(sizeof(size_t) * __max_threads);
!               __bin._M_free = static_cast<size_t*>(__v);
  
  	      __v = ::operator new(sizeof(size_t) * __max_threads);
!               __bin._M_used = static_cast<size_t*>(__v);
  
  	      __v = ::operator new(sizeof(__gthread_mutex_t));
!               __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
  
  #ifdef __GTHREAD_MUTEX_INIT
                {
                  // Do not copy a POSIX/gthr mutex once in use.
                  __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
!                 *__bin._M_mutex = __tmp;
                }
  #else
!               { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
  #endif
              }
  #endif
  
!           for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
              {
!               __bin._M_first[__threadn] = NULL;
  #ifdef __GTHREADS
                if (__gthread_active_p())
                  {
!                   __bin._M_free[__threadn] = 0;
!                   __bin._M_used[__threadn] = 0;
                  }
  #endif
              }
*************** namespace __gnu_cxx
*** 624,636 ****
      {
  #ifdef __GTHREADS
        // If we have thread support and it's active we check the thread
!       // key value and return it's id or if it's not set we take the
        // first record from _S_thread_freelist and sets the key and
        // returns it's id.
        if (__gthread_active_p())
          {
!           thread_record* __freelist_pos =
! 	    static_cast<thread_record*>(__gthread_getspecific(_S_thread_key)); 
  	  if (__freelist_pos == NULL)
              {
  	      // Since _S_options._M_max_threads must be larger than
--- 620,632 ----
      {
  #ifdef __GTHREADS
        // If we have thread support and it's active we check the thread
!       // key value and return its id or if it's not set we take the
        // first record from _S_thread_freelist and sets the key and
        // returns it's id.
        if (__gthread_active_p())
          {
!           _Thread_record* __freelist_pos =
! 	    static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key)); 
  	  if (__freelist_pos == NULL)
              {
  	      // Since _S_options._M_max_threads must be larger than
*************** namespace __gnu_cxx
*** 638,650 ****
  	      // list can never be empty.
                __gthread_mutex_lock(&_S_thread_freelist_mutex);
                __freelist_pos = _S_thread_freelist_first;
!               _S_thread_freelist_first = _S_thread_freelist_first->next;
                __gthread_mutex_unlock(&_S_thread_freelist_mutex);
  
                __gthread_setspecific(_S_thread_key, 
  				    static_cast<void*>(__freelist_pos));
              }
!           return __freelist_pos->id;
          }
  #endif
        // Otherwise (no thread support or inactive) all requests are
--- 634,646 ----
  	      // list can never be empty.
                __gthread_mutex_lock(&_S_thread_freelist_mutex);
                __freelist_pos = _S_thread_freelist_first;
!               _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
                __gthread_mutex_unlock(&_S_thread_freelist_mutex);
  
                __gthread_setspecific(_S_thread_key, 
  				    static_cast<void*>(__freelist_pos));
              }
!           return __freelist_pos->_M_id;
          }
  #endif
        // Otherwise (no thread support or inactive) all requests are
*************** namespace __gnu_cxx
*** 660,667 ****
      {
        // Return this thread id record to front of thread_freelist.
        __gthread_mutex_lock(&_S_thread_freelist_mutex);
!       thread_record* __tr = static_cast<thread_record*>(__freelist_pos);
!       __tr->next = _S_thread_freelist_first;
        _S_thread_freelist_first = __tr;
        __gthread_mutex_unlock(&_S_thread_freelist_mutex);
      }
--- 656,663 ----
      {
        // Return this thread id record to front of thread_freelist.
        __gthread_mutex_lock(&_S_thread_freelist_mutex);
!       _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
!       __tr->_M_next = _S_thread_freelist_first;
        _S_thread_freelist_first = __tr;
        __gthread_mutex_unlock(&_S_thread_freelist_mutex);
      }
*************** namespace __gnu_cxx
*** 681,693 ****
      bool __mt_alloc<_Tp>::_S_init = false;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::tune __mt_alloc<_Tp>::_S_options;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::binmap_type* __mt_alloc<_Tp>::_S_binmap;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::bin_record* volatile __mt_alloc<_Tp>::_S_bin;
  
    template<typename _Tp> 
      size_t __mt_alloc<_Tp>::_S_bin_size = 1;
--- 677,689 ----
      bool __mt_alloc<_Tp>::_S_init = false;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;
  
    template<typename _Tp> 
      size_t __mt_alloc<_Tp>::_S_bin_size = 1;
*************** namespace __gnu_cxx
*** 698,704 ****
      __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::thread_record*
      volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
  
    template<typename _Tp> 
--- 694,700 ----
      __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
  
    template<typename _Tp> 
!     typename __mt_alloc<_Tp>::_Thread_record*
      volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
  
    template<typename _Tp> 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]