mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*-
00002 
00003 // Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 2, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // You should have received a copy of the GNU General Public License along
00017 // with this library; see the file COPYING.  If not, write to the Free
00018 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
00019 // USA.
00020 
00021 // As a special exception, you may use this file as part of a free software
00022 // library without restriction.  Specifically, if other files instantiate
00023 // templates or use macros or inline functions from this file, or you compile
00024 // this file and link it with other files to produce an executable, this
00025 // file does not by itself cause the resulting executable to be covered by
00026 // the GNU General Public License.  This exception does not however
00027 // invalidate any other reasons why the executable file might be covered by
00028 // the GNU General Public License.
00029 
00030 /** @file ext/mt_allocator.h
00031  *  This file is a GNU extension to the Standard C++ Library.
00032  */
00033 
00034 #ifndef _MT_ALLOCATOR_H
00035 #define _MT_ALLOCATOR_H 1
00036 
00037 #include <new>
00038 #include <cstdlib>
00039 #include <bits/functexcept.h>
00040 #include <bits/gthr.h>
00041 #include <bits/atomicity.h>
00042 
00043 namespace __gnu_cxx
00044 {
00045   typedef void (*__destroy_handler)(void*);
00046 
00047   /// @brief  Base class for pool object.
00048   struct __pool_base
00049   {
00050     // Using short int as type for the binmap implies we are never
00051     // caching blocks larger than 32768 with this allocator.
00052     typedef unsigned short int _Binmap_type;
00053 
00054     // Variables used to configure the behavior of the allocator,
00055     // assigned and explained in detail below.
00056     struct _Tune
00057      {
00058       // Compile time constants for the default _Tune values.
00059       enum { _S_align = 8 };
00060       enum { _S_max_bytes = 128 };
00061       enum { _S_min_bin = 8 };
00062       enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
00063       enum { _S_max_threads = 4096 };
00064       enum { _S_freelist_headroom = 10 };
00065 
00066       // Alignment needed.
00067       // NB: In any case must be >= sizeof(_Block_record), that
00068       // is 4 on 32 bit machines and 8 on 64 bit machines.
00069       size_t    _M_align;
00070       
00071       // Allocation requests (after round-up to power of 2) below
00072       // this value will be handled by the allocator. A raw new/
00073       // call will be used for requests larger than this value.
00074       // NB: Must be much smaller than _M_chunk_size and in any
00075       // case <= 32768.
00076       size_t    _M_max_bytes; 
00077 
00078       // Size in bytes of the smallest bin.
00079       // NB: Must be a power of 2 and >= _M_align (and of course
00080       // much smaller than _M_max_bytes).
00081       size_t    _M_min_bin;
00082 
00083       // In order to avoid fragmenting and minimize the number of
00084       // new() calls we always request new memory using this
00085       // value. Based on previous discussions on the libstdc++
00086       // mailing list we have choosen the value below.
00087       // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
00088       // NB: At least one order of magnitude > _M_max_bytes. 
00089       size_t    _M_chunk_size;
00090 
00091       // The maximum number of supported threads. For
00092       // single-threaded operation, use one. Maximum values will
00093       // vary depending on details of the underlying system. (For
00094       // instance, Linux 2.4.18 reports 4070 in
00095       // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
00096       // 65534)
00097       size_t    _M_max_threads;
00098 
00099       // Each time a deallocation occurs in a threaded application
00100       // we make sure that there are no more than
00101       // _M_freelist_headroom % of used memory on the freelist. If
00102       // the number of additional records is more than
00103       // _M_freelist_headroom % of the freelist, we move these
00104       // records back to the global pool.
00105       size_t    _M_freelist_headroom;
00106       
00107       // Set to true forces all allocations to use new().
00108       bool  _M_force_new; 
00109       
00110       explicit
00111       _Tune()
00112       : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
00113       _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads), 
00114       _M_freelist_headroom(_S_freelist_headroom), 
00115       _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
00116       { }
00117 
00118       explicit
00119       _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 
00120         size_t __maxthreads, size_t __headroom, bool __force) 
00121       : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
00122       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
00123       _M_freelist_headroom(__headroom), _M_force_new(__force)
00124       { }
00125     };
00126     
00127     struct _Block_address
00128     {
00129       void*             _M_initial;
00130       _Block_address*       _M_next;
00131     };
00132     
00133     const _Tune&
00134     _M_get_options() const
00135     { return _M_options; }
00136 
00137     void
00138     _M_set_options(_Tune __t)
00139     { 
00140       if (!_M_init)
00141     _M_options = __t;
00142     }
00143 
00144     bool
00145     _M_check_threshold(size_t __bytes)
00146     { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
00147 
00148     size_t
00149     _M_get_binmap(size_t __bytes)
00150     { return _M_binmap[__bytes]; }
00151 
00152     const size_t
00153     _M_get_align()
00154     { return _M_options._M_align; }
00155 
00156     explicit 
00157     __pool_base() 
00158     : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
00159 
00160     explicit 
00161     __pool_base(const _Tune& __options)
00162     : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
00163 
00164   private:
00165     explicit 
00166     __pool_base(const __pool_base&);
00167 
00168     __pool_base&
00169     operator=(const __pool_base&);
00170 
00171   protected:
00172     // Configuration options.
00173     _Tune               _M_options;
00174     
00175     _Binmap_type*       _M_binmap;
00176 
00177     // Configuration of the pool object via _M_options can happen
00178     // after construction but before initialization. After
00179     // initialization is complete, this variable is set to true.
00180     bool            _M_init;
00181   };
00182 
00183 
00184   /**
00185    *  @brief  Data describing the underlying memory pool, parameterized on
00186    *  threading support.
00187    */
00188   template<bool _Thread>
00189     class __pool;
00190 
00191   /// Specialization for single thread.
00192   template<>
00193     class __pool<false> : public __pool_base
00194     {
00195     public:
00196       union _Block_record
00197       {
00198     // Points to the block_record of the next free block.
00199     _Block_record* volatile         _M_next;
00200       };
00201 
00202       struct _Bin_record
00203       {
00204     // An "array" of pointers to the first free block.
00205     _Block_record** volatile        _M_first;
00206 
00207     // A list of the initial addresses of all allocated blocks.
00208     _Block_address*             _M_address;
00209       };
00210       
00211       void
00212       _M_initialize_once()
00213       {
00214     if (__builtin_expect(_M_init == false, false))
00215       _M_initialize();
00216       }
00217 
00218       void
00219       _M_destroy() throw();
00220 
00221       char* 
00222       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00223     
00224       void
00225       _M_reclaim_block(char* __p, size_t __bytes);
00226     
00227       size_t 
00228       _M_get_thread_id() { return 0; }
00229       
00230       const _Bin_record&
00231       _M_get_bin(size_t __which)
00232       { return _M_bin[__which]; }
00233       
00234       void
00235       _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
00236       { }
00237 
00238       explicit __pool() 
00239       : _M_bin(NULL), _M_bin_size(1) { }
00240 
00241       explicit __pool(const __pool_base::_Tune& __tune) 
00242       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
00243 
00244     private:
00245       // An "array" of bin_records each of which represents a specific
00246       // power of 2 size. Memory to this "array" is allocated in
00247       // _M_initialize().
00248       _Bin_record* volatile _M_bin;
00249       
00250       // Actual value calculated in _M_initialize().
00251       size_t                    _M_bin_size;     
00252 
00253       void
00254       _M_initialize();
00255   };
00256  
00257 #ifdef __GTHREADS
00258   /// Specialization for thread enabled, via gthreads.h.
00259   template<>
00260     class __pool<true> : public __pool_base
00261     {
00262     public:
00263       // Each requesting thread is assigned an id ranging from 1 to
00264       // _S_max_threads. Thread id 0 is used as a global memory pool.
00265       // In order to get constant performance on the thread assignment
00266       // routine, we keep a list of free ids. When a thread first
00267       // requests memory we remove the first record in this list and
00268       // stores the address in a __gthread_key. When initializing the
00269       // __gthread_key we specify a destructor. When this destructor
00270       // (i.e. the thread dies) is called, we return the thread id to
00271       // the front of this list.
00272       struct _Thread_record
00273       {
00274     // Points to next free thread id record. NULL if last record in list.
00275     _Thread_record* volatile        _M_next;
00276     
00277     // Thread id ranging from 1 to _S_max_threads.
00278     size_t                          _M_id;
00279       };
00280       
00281       union _Block_record
00282       {
00283     // Points to the block_record of the next free block.
00284     _Block_record* volatile         _M_next;
00285     
00286     // The thread id of the thread which has requested this block.
00287     size_t                          _M_thread_id;
00288       };
00289       
00290       struct _Bin_record
00291       {
00292     // An "array" of pointers to the first free block for each
00293     // thread id. Memory to this "array" is allocated in
00294     // _S_initialize() for _S_max_threads + global pool 0.
00295     _Block_record** volatile        _M_first;
00296     
00297     // A list of the initial addresses of all allocated blocks.
00298     _Block_address*             _M_address;
00299 
00300     // An "array" of counters used to keep track of the amount of
00301     // blocks that are on the freelist/used for each thread id.
00302     // Memory to these "arrays" is allocated in _S_initialize() for
00303     // _S_max_threads + global pool 0.
00304     size_t* volatile                _M_free;
00305     size_t* volatile                _M_used;
00306     
00307     // Each bin has its own mutex which is used to ensure data
00308     // integrity while changing "ownership" on a block.  The mutex
00309     // is initialized in _S_initialize().
00310     __gthread_mutex_t*              _M_mutex;
00311       };
00312       
00313       // XXX GLIBCXX_ABI Deprecated
00314       void
00315       _M_initialize(__destroy_handler);
00316 
00317       void
00318       _M_initialize_once()
00319       {
00320     if (__builtin_expect(_M_init == false, false))
00321       _M_initialize();
00322       }
00323 
00324       void
00325       _M_destroy() throw();
00326 
00327       char* 
00328       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00329     
00330       void
00331       _M_reclaim_block(char* __p, size_t __bytes);
00332     
00333       const _Bin_record&
00334       _M_get_bin(size_t __which)
00335       { return _M_bin[__which]; }
00336       
00337       void
00338       _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 
00339              size_t __thread_id)
00340       {
00341     if (__gthread_active_p())
00342       {
00343         __block->_M_thread_id = __thread_id;
00344         --__bin._M_free[__thread_id];
00345         ++__bin._M_used[__thread_id];
00346       }
00347       }
00348 
00349       // XXX GLIBCXX_ABI Deprecated
00350       void 
00351       _M_destroy_thread_key(void*);
00352 
00353       size_t 
00354       _M_get_thread_id();
00355 
00356       explicit __pool() 
00357       : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL) 
00358       { }
00359 
00360       explicit __pool(const __pool_base::_Tune& __tune) 
00361       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1), 
00362       _M_thread_freelist(NULL) 
00363       { }
00364 
00365     private:
00366       // An "array" of bin_records each of which represents a specific
00367       // power of 2 size. Memory to this "array" is allocated in
00368       // _M_initialize().
00369       _Bin_record* volatile _M_bin;
00370 
00371       // Actual value calculated in _M_initialize().
00372       size_t                    _M_bin_size;
00373 
00374       _Thread_record*       _M_thread_freelist;
00375       void*         _M_thread_freelist_initial;
00376 
00377       void
00378       _M_initialize();
00379     };
00380 #endif
00381 
00382   template<template <bool> class _PoolTp, bool _Thread>
00383     struct __common_pool
00384     {
00385       typedef _PoolTp<_Thread>      pool_type;
00386       
00387       static pool_type&
00388       _S_get_pool()
00389       { 
00390     static pool_type _S_pool;
00391     return _S_pool;
00392       }
00393     };
00394 
00395   template<template <bool> class _PoolTp, bool _Thread>
00396     struct __common_pool_base;
00397 
00398   template<template <bool> class _PoolTp>
00399     struct __common_pool_base<_PoolTp, false> 
00400     : public __common_pool<_PoolTp, false>
00401     {
00402       using  __common_pool<_PoolTp, false>::_S_get_pool;
00403 
00404       static void
00405       _S_initialize_once()
00406       {
00407     static bool __init;
00408     if (__builtin_expect(__init == false, false))
00409       {
00410         _S_get_pool()._M_initialize_once(); 
00411         __init = true;
00412       }
00413       }
00414     };
00415 
00416 #ifdef __GTHREADS
00417   template<template <bool> class _PoolTp>
00418     struct __common_pool_base<_PoolTp, true>
00419     : public __common_pool<_PoolTp, true>
00420     {
00421       using  __common_pool<_PoolTp, true>::_S_get_pool;
00422       
00423       static void
00424       _S_initialize() 
00425       { _S_get_pool()._M_initialize_once(); }
00426 
00427       static void
00428       _S_initialize_once()
00429       { 
00430     static bool __init;
00431     if (__builtin_expect(__init == false, false))
00432       {
00433         if (__gthread_active_p())
00434           {
00435         // On some platforms, __gthread_once_t is an aggregate.
00436         static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
00437         __gthread_once(&__once, _S_initialize);
00438           }
00439 
00440         // Double check initialization. May be necessary on some
00441         // systems for proper construction when not compiling with
00442         // thread flags.
00443         _S_get_pool()._M_initialize_once(); 
00444         __init = true;
00445       }
00446       }
00447     };
00448 #endif
00449 
00450   /// @brief  Policy for shared __pool objects.
00451   template<template <bool> class _PoolTp, bool _Thread>
00452     struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
00453     {
00454       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00455            bool _Thread1 = _Thread>
00456         struct _M_rebind
00457         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
00458 
00459       using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
00460       using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
00461   };
00462  
00463 
00464   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00465     struct __per_type_pool
00466     {
00467       typedef _Tp           value_type;
00468       typedef _PoolTp<_Thread>      pool_type;
00469       
00470       static pool_type&
00471       _S_get_pool()
00472       { 
00473     // Sane defaults for the _PoolTp.
00474     typedef typename pool_type::_Block_record _Block_record;
00475     const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
00476                    ? __alignof__(_Tp) : sizeof(_Block_record));
00477 
00478     typedef typename __pool_base::_Tune _Tune;
00479     static _Tune _S_tune(__a, sizeof(_Tp) * 64,
00480                  sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
00481                  sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
00482                  _Tune::_S_max_threads,
00483                  _Tune::_S_freelist_headroom,
00484                  getenv("GLIBCXX_FORCE_NEW") ? true : false);
00485     static pool_type _S_pool(_S_tune);
00486     return _S_pool;
00487       }
00488     };
00489 
00490   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00491     struct __per_type_pool_base;
00492 
00493   template<typename _Tp, template <bool> class _PoolTp>
00494     struct __per_type_pool_base<_Tp, _PoolTp, false> 
00495     : public __per_type_pool<_Tp, _PoolTp, false> 
00496     {
00497       using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
00498 
00499       static void
00500       _S_initialize_once()
00501       {
00502     static bool __init;
00503     if (__builtin_expect(__init == false, false))
00504       {
00505         _S_get_pool()._M_initialize_once(); 
00506         __init = true;
00507       }
00508       }
00509     };
00510 
00511  #ifdef __GTHREADS
00512  template<typename _Tp, template <bool> class _PoolTp>
00513     struct __per_type_pool_base<_Tp, _PoolTp, true> 
00514     : public __per_type_pool<_Tp, _PoolTp, true> 
00515     {
00516       using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
00517 
00518       static void
00519       _S_initialize() 
00520       { _S_get_pool()._M_initialize_once(); }
00521 
00522       static void
00523       _S_initialize_once()
00524       { 
00525     static bool __init;
00526     if (__builtin_expect(__init == false, false))
00527       {
00528         if (__gthread_active_p())
00529           {
00530         // On some platforms, __gthread_once_t is an aggregate.
00531         static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
00532         __gthread_once(&__once, _S_initialize);
00533           }
00534 
00535         // Double check initialization. May be necessary on some
00536         // systems for proper construction when not compiling with
00537         // thread flags.
00538         _S_get_pool()._M_initialize_once(); 
00539         __init = true;
00540       }
00541       }
00542     };
00543 #endif
00544 
00545   /// @brief  Policy for individual __pool objects.
00546   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00547     struct __per_type_pool_policy 
00548     : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
00549     {
00550       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00551            bool _Thread1 = _Thread>
00552         struct _M_rebind
00553         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
00554 
00555       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
00556       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
00557   };
00558 
00559 
00560   /// @brief  Base class for _Tp dependent member functions.
00561   template<typename _Tp>
00562     class __mt_alloc_base 
00563     {
00564     public:
00565       typedef size_t                    size_type;
00566       typedef ptrdiff_t                 difference_type;
00567       typedef _Tp*                      pointer;
00568       typedef const _Tp*                const_pointer;
00569       typedef _Tp&                      reference;
00570       typedef const _Tp&                const_reference;
00571       typedef _Tp                       value_type;
00572 
00573       pointer
00574       address(reference __x) const
00575       { return &__x; }
00576 
00577       const_pointer
00578       address(const_reference __x) const
00579       { return &__x; }
00580 
00581       size_type
00582       max_size() const throw() 
00583       { return size_t(-1) / sizeof(_Tp); }
00584 
00585       // _GLIBCXX_RESOLVE_LIB_DEFECTS
00586       // 402. wrong new expression in [some_] allocator::construct
00587       void 
00588       construct(pointer __p, const _Tp& __val) 
00589       { ::new(__p) _Tp(__val); }
00590 
00591       void 
00592       destroy(pointer __p) { __p->~_Tp(); }
00593     };
00594 
00595 #ifdef __GTHREADS
00596 #define __thread_default true
00597 #else
00598 #define __thread_default false
00599 #endif
00600 
00601   /**
00602    *  @brief  This is a fixed size (power of 2) allocator which - when
00603    *  compiled with thread support - will maintain one freelist per
00604    *  size per thread plus a "global" one. Steps are taken to limit
00605    *  the per thread freelist sizes (by returning excess back to
00606    *  the "global" list).
00607    *
00608    *  Further details:
00609    *  http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
00610    */
00611   template<typename _Tp, 
00612        typename _Poolp = __common_pool_policy<__pool, __thread_default> >
00613     class __mt_alloc : public __mt_alloc_base<_Tp>
00614     {
00615     public:
00616       typedef size_t                        size_type;
00617       typedef ptrdiff_t                     difference_type;
00618       typedef _Tp*                          pointer;
00619       typedef const _Tp*                    const_pointer;
00620       typedef _Tp&                          reference;
00621       typedef const _Tp&                    const_reference;
00622       typedef _Tp                           value_type;
00623       typedef _Poolp                __policy_type;
00624       typedef typename _Poolp::pool_type    __pool_type;
00625 
00626       template<typename _Tp1, typename _Poolp1 = _Poolp>
00627         struct rebind
00628         { 
00629       typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
00630       typedef __mt_alloc<_Tp1, pol_type> other;
00631     };
00632 
00633       __mt_alloc() throw() { }
00634 
00635       __mt_alloc(const __mt_alloc&) throw() { }
00636 
00637       template<typename _Tp1, typename _Poolp1>
00638         __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { }
00639 
00640       ~__mt_alloc() throw() { }
00641 
00642       pointer
00643       allocate(size_type __n, const void* = 0);
00644 
00645       void
00646       deallocate(pointer __p, size_type __n);
00647 
00648       const __pool_base::_Tune
00649       _M_get_options()
00650       { 
00651     // Return a copy, not a reference, for external consumption.
00652     return __policy_type::_S_get_pool()._M_get_options();
00653       }
00654       
00655       void
00656       _M_set_options(__pool_base::_Tune __t)
00657       { __policy_type::_S_get_pool()._M_set_options(__t); }
00658     };
00659 
00660   template<typename _Tp, typename _Poolp>
00661     typename __mt_alloc<_Tp, _Poolp>::pointer
00662     __mt_alloc<_Tp, _Poolp>::
00663     allocate(size_type __n, const void*)
00664     {
00665       if (__builtin_expect(__n > this->max_size(), false))
00666     std::__throw_bad_alloc();
00667 
00668       __policy_type::_S_initialize_once();
00669 
00670       // Requests larger than _M_max_bytes are handled by operator
00671       // new/delete directly.
00672       __pool_type& __pool = __policy_type::_S_get_pool();
00673       const size_t __bytes = __n * sizeof(_Tp);
00674       if (__pool._M_check_threshold(__bytes))
00675     {
00676       void* __ret = ::operator new(__bytes);
00677       return static_cast<_Tp*>(__ret);
00678     }
00679       
00680       // Round up to power of 2 and figure out which bin to use.
00681       const size_t __which = __pool._M_get_binmap(__bytes);
00682       const size_t __thread_id = __pool._M_get_thread_id();
00683       
00684       // Find out if we have blocks on our freelist.  If so, go ahead
00685       // and use them directly without having to lock anything.
00686       char* __c;
00687       typedef typename __pool_type::_Bin_record _Bin_record;
00688       const _Bin_record& __bin = __pool._M_get_bin(__which);
00689       if (__bin._M_first[__thread_id])
00690     {
00691       // Already reserved.
00692       typedef typename __pool_type::_Block_record _Block_record;
00693       _Block_record* __block = __bin._M_first[__thread_id];
00694       __bin._M_first[__thread_id] = __block->_M_next;
00695       
00696       __pool._M_adjust_freelist(__bin, __block, __thread_id);
00697       __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
00698     }
00699       else
00700     {
00701       // Null, reserve.
00702       __c = __pool._M_reserve_block(__bytes, __thread_id);
00703     }
00704       return static_cast<_Tp*>(static_cast<void*>(__c));
00705     }
00706   
00707   template<typename _Tp, typename _Poolp>
00708     void
00709     __mt_alloc<_Tp, _Poolp>::
00710     deallocate(pointer __p, size_type __n)
00711     {
00712       if (__builtin_expect(__p != 0, true))
00713     {
00714       // Requests larger than _M_max_bytes are handled by
00715       // operators new/delete directly.
00716       __pool_type& __pool = __policy_type::_S_get_pool();
00717       const size_t __bytes = __n * sizeof(_Tp);
00718       if (__pool._M_check_threshold(__bytes))
00719         ::operator delete(__p);
00720       else
00721         __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
00722     }
00723     }
00724   
00725   template<typename _Tp, typename _Poolp>
00726     inline bool
00727     operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00728     { return true; }
00729   
00730   template<typename _Tp, typename _Poolp>
00731     inline bool
00732     operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00733     { return false; }
00734 
00735 #undef __thread_default
00736 } // namespace __gnu_cxx
00737 
00738 #endif

Generated on Thu Nov 1 17:35:59 2007 for libstdc++ by  doxygen 1.5.1