mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*- 00002 00003 // Copyright (C) 2003, 2004 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 2, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // You should have received a copy of the GNU General Public License along 00017 // with this library; see the file COPYING. If not, write to the Free 00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 00019 // USA. 00020 00021 // As a special exception, you may use this file as part of a free software 00022 // library without restriction. Specifically, if other files instantiate 00023 // templates or use macros or inline functions from this file, or you compile 00024 // this file and link it with other files to produce an executable, this 00025 // file does not by itself cause the resulting executable to be covered by 00026 // the GNU General Public License. This exception does not however 00027 // invalidate any other reasons why the executable file might be covered by 00028 // the GNU General Public License. 00029 00030 /** @file ext/mt_allocator.h 00031 * This file is a GNU extension to the Standard C++ Library. 00032 * You should only include this header if you are using GCC 3 or later. 00033 */ 00034 00035 #ifndef _MT_ALLOCATOR_H 00036 #define _MT_ALLOCATOR_H 1 00037 00038 #include <new> 00039 #include <cstdlib> 00040 #include <bits/functexcept.h> 00041 #include <bits/gthr.h> 00042 #include <bits/atomicity.h> 00043 00044 namespace __gnu_cxx 00045 { 00046 /** 00047 * This is a fixed size (power of 2) allocator which - when 00048 * compiled with thread support - will maintain one freelist per 00049 * size per thread plus a "global" one. Steps are taken to limit 00050 * the per thread freelist sizes (by returning excess back to 00051 * "global"). 00052 * 00053 * Further details: 00054 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html 00055 */ 00056 template<typename _Tp> 00057 class __mt_alloc 00058 { 00059 public: 00060 typedef size_t size_type; 00061 typedef ptrdiff_t difference_type; 00062 typedef _Tp* pointer; 00063 typedef const _Tp* const_pointer; 00064 typedef _Tp& reference; 00065 typedef const _Tp& const_reference; 00066 typedef _Tp value_type; 00067 00068 template<typename _Tp1> 00069 struct rebind 00070 { typedef __mt_alloc<_Tp1> other; }; 00071 00072 __mt_alloc() throw() 00073 { 00074 // XXX 00075 } 00076 00077 __mt_alloc(const __mt_alloc&) throw() 00078 { 00079 // XXX 00080 } 00081 00082 template<typename _Tp1> 00083 __mt_alloc(const __mt_alloc<_Tp1>& obj) throw() 00084 { 00085 // XXX 00086 } 00087 00088 ~__mt_alloc() throw() { } 00089 00090 pointer 00091 address(reference __x) const 00092 { return &__x; } 00093 00094 const_pointer 00095 address(const_reference __x) const 00096 { return &__x; } 00097 00098 size_type 00099 max_size() const throw() 00100 { return size_t(-1) / sizeof(_Tp); } 00101 00102 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00103 // 402. wrong new expression in [some_] allocator::construct 00104 void 00105 construct(pointer __p, const _Tp& __val) 00106 { ::new(__p) _Tp(__val); } 00107 00108 void 00109 destroy(pointer __p) { __p->~_Tp(); } 00110 00111 pointer 00112 allocate(size_type __n, const void* = 0); 00113 00114 void 00115 deallocate(pointer __p, size_type __n); 00116 00117 // Variables used to configure the behavior of the allocator, 00118 // assigned and explained in detail below. 00119 struct _Tune 00120 { 00121 // Allocation requests (after round-up to power of 2) below 00122 // this value will be handled by the allocator. A raw new/ 00123 // call will be used for requests larger than this value. 00124 size_t _M_max_bytes; 00125 00126 // Size in bytes of the smallest bin (must be a power of 2). 00127 size_t _M_min_bin; 00128 00129 // In order to avoid fragmenting and minimize the number of 00130 // new() calls we always request new memory using this 00131 // value. Based on previous discussions on the libstdc++ 00132 // mailing list we have choosen the value below. 00133 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html 00134 size_t _M_chunk_size; 00135 00136 // The maximum number of supported threads. Our Linux 2.4.18 00137 // reports 4070 in /proc/sys/kernel/threads-max 00138 size_t _M_max_threads; 00139 00140 // Each time a deallocation occurs in a threaded application 00141 // we make sure that there are no more than 00142 // _M_freelist_headroom % of used memory on the freelist. If 00143 // the number of additional records is more than 00144 // _M_freelist_headroom % of the freelist, we move these 00145 // records back to the global pool. 00146 size_t _M_freelist_headroom; 00147 00148 // Set to true forces all allocations to use new(). 00149 bool _M_force_new; 00150 00151 explicit 00152 _Tune() 00153 : _M_max_bytes(128), _M_min_bin(8), 00154 _M_chunk_size(4096 - 4 * sizeof(void*)), 00155 _M_max_threads(4096), _M_freelist_headroom(10), 00156 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false) 00157 { } 00158 00159 explicit 00160 _Tune(size_t __maxb, size_t __minbin, size_t __chunk, 00161 size_t __maxthreads, size_t __headroom, bool __force) 00162 : _M_max_bytes(__maxb), _M_min_bin(__minbin), _M_chunk_size(__chunk), 00163 _M_max_threads(__maxthreads), _M_freelist_headroom(__headroom), 00164 _M_force_new(__force) 00165 { } 00166 }; 00167 00168 private: 00169 // We need to create the initial lists and set up some variables 00170 // before we can answer to the first request for memory. 00171 #ifdef __GTHREADS 00172 static __gthread_once_t _S_once; 00173 #endif 00174 static bool _S_init; 00175 00176 static void 00177 _S_initialize(); 00178 00179 // Configuration options. 00180 static _Tune _S_options; 00181 00182 static const _Tune 00183 _S_get_options() 00184 { return _S_options; } 00185 00186 static void 00187 _S_set_options(_Tune __t) 00188 { 00189 if (!_S_init) 00190 _S_options = __t; 00191 } 00192 00193 // Using short int as type for the binmap implies we are never 00194 // caching blocks larger than 65535 with this allocator 00195 typedef unsigned short int _Binmap_type; 00196 static _Binmap_type* _S_binmap; 00197 00198 // Each requesting thread is assigned an id ranging from 1 to 00199 // _S_max_threads. Thread id 0 is used as a global memory pool. 00200 // In order to get constant performance on the thread assignment 00201 // routine, we keep a list of free ids. When a thread first 00202 // requests memory we remove the first record in this list and 00203 // stores the address in a __gthread_key. When initializing the 00204 // __gthread_key we specify a destructor. When this destructor 00205 // (i.e. the thread dies) is called, we return the thread id to 00206 // the front of this list. 00207 #ifdef __GTHREADS 00208 struct _Thread_record 00209 { 00210 // Points to next free thread id record. NULL if last record in list. 00211 _Thread_record* volatile _M_next; 00212 00213 // Thread id ranging from 1 to _S_max_threads. 00214 size_t _M_id; 00215 }; 00216 00217 static _Thread_record* volatile _S_thread_freelist_first; 00218 static __gthread_mutex_t _S_thread_freelist_mutex; 00219 static __gthread_key_t _S_thread_key; 00220 00221 static void 00222 _S_destroy_thread_key(void* __freelist_pos); 00223 #endif 00224 00225 static size_t 00226 _S_get_thread_id(); 00227 00228 union _Block_record 00229 { 00230 // Points to the block_record of the next free block. 00231 _Block_record* volatile _M_next; 00232 00233 #ifdef __GTHREADS 00234 // The thread id of the thread which has requested this block. 00235 size_t _M_thread_id; 00236 #endif 00237 }; 00238 00239 struct _Bin_record 00240 { 00241 // An "array" of pointers to the first free block for each 00242 // thread id. Memory to this "array" is allocated in _S_initialize() 00243 // for _S_max_threads + global pool 0. 00244 _Block_record** volatile _M_first; 00245 00246 #ifdef __GTHREADS 00247 // An "array" of counters used to keep track of the amount of 00248 // blocks that are on the freelist/used for each thread id. 00249 // Memory to these "arrays" is allocated in _S_initialize() for 00250 // _S_max_threads + global pool 0. 00251 size_t* volatile _M_free; 00252 size_t* volatile _M_used; 00253 00254 // Each bin has its own mutex which is used to ensure data 00255 // integrity while changing "ownership" on a block. The mutex 00256 // is initialized in _S_initialize(). 00257 __gthread_mutex_t* _M_mutex; 00258 #endif 00259 }; 00260 00261 // An "array" of bin_records each of which represents a specific 00262 // power of 2 size. Memory to this "array" is allocated in 00263 // _S_initialize(). 00264 static _Bin_record* volatile _S_bin; 00265 00266 // Actual value calculated in _S_initialize(). 00267 static size_t _S_bin_size; 00268 }; 00269 00270 template<typename _Tp> 00271 typename __mt_alloc<_Tp>::pointer 00272 __mt_alloc<_Tp>:: 00273 allocate(size_type __n, const void*) 00274 { 00275 // Although the test in __gthread_once() would suffice, we wrap 00276 // test of the once condition in our own unlocked check. This 00277 // saves one function call to pthread_once() (which itself only 00278 // tests for the once value unlocked anyway and immediately 00279 // returns if set) 00280 if (!_S_init) 00281 { 00282 #ifdef __GTHREADS 00283 if (__gthread_active_p()) 00284 __gthread_once(&_S_once, _S_initialize); 00285 #endif 00286 if (!_S_init) 00287 _S_initialize(); 00288 } 00289 00290 // Requests larger than _M_max_bytes are handled by new/delete 00291 // directly. 00292 const size_t __bytes = __n * sizeof(_Tp); 00293 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new) 00294 { 00295 void* __ret = ::operator new(__bytes); 00296 return static_cast<_Tp*>(__ret); 00297 } 00298 00299 // Round up to power of 2 and figure out which bin to use. 00300 const size_t __which = _S_binmap[__bytes]; 00301 const size_t __thread_id = _S_get_thread_id(); 00302 00303 // Find out if we have blocks on our freelist. If so, go ahead 00304 // and use them directly without having to lock anything. 00305 const _Bin_record& __bin = _S_bin[__which]; 00306 _Block_record* __block = NULL; 00307 if (__bin._M_first[__thread_id] == NULL) 00308 { 00309 const size_t __bin_size = ((_S_options._M_min_bin << __which) 00310 + sizeof(_Block_record)); 00311 size_t __block_count = _S_options._M_chunk_size / __bin_size; 00312 00313 // Are we using threads? 00314 // - Yes, check if there are free blocks on the global 00315 // list. If so, grab up to __block_count blocks in one 00316 // lock and change ownership. If the global list is 00317 // empty, we allocate a new chunk and add those blocks 00318 // directly to our own freelist (with us as owner). 00319 // - No, all operations are made directly to global pool 0 00320 // no need to lock or change ownership but check for free 00321 // blocks on global list (and if not add new ones) and 00322 // get the first one. 00323 #ifdef __GTHREADS 00324 if (__gthread_active_p()) 00325 { 00326 __gthread_mutex_lock(__bin._M_mutex); 00327 if (__bin._M_first[0] == NULL) 00328 { 00329 // No need to hold the lock when we are adding a 00330 // whole chunk to our own list. 00331 __gthread_mutex_unlock(__bin._M_mutex); 00332 00333 void* __v = ::operator new(_S_options._M_chunk_size); 00334 __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v); 00335 __bin._M_free[__thread_id] = __block_count; 00336 00337 --__block_count; 00338 __block = __bin._M_first[__thread_id]; 00339 while (__block_count-- > 0) 00340 { 00341 char* __c = reinterpret_cast<char*>(__block) + __bin_size; 00342 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 00343 __block = __block->_M_next; 00344 } 00345 __block->_M_next = NULL; 00346 } 00347 else 00348 { 00349 // Is the number of required blocks greater than or 00350 // equal to the number that can be provided by the 00351 // global free list? 00352 __bin._M_first[__thread_id] = __bin._M_first[0]; 00353 if (__block_count >= __bin._M_free[0]) 00354 { 00355 __bin._M_free[__thread_id] = __bin._M_free[0]; 00356 __bin._M_free[0] = 0; 00357 __bin._M_first[0] = NULL; 00358 } 00359 else 00360 { 00361 __bin._M_free[__thread_id] = __block_count; 00362 __bin._M_free[0] -= __block_count; 00363 --__block_count; 00364 __block = __bin._M_first[0]; 00365 while (__block_count-- > 0) 00366 __block = __block->_M_next; 00367 __bin._M_first[0] = __block->_M_next; 00368 __block->_M_next = NULL; 00369 } 00370 __gthread_mutex_unlock(__bin._M_mutex); 00371 } 00372 } 00373 else 00374 #endif 00375 { 00376 void* __v = ::operator new(_S_options._M_chunk_size); 00377 __bin._M_first[0] = static_cast<_Block_record*>(__v); 00378 00379 --__block_count; 00380 __block = __bin._M_first[0]; 00381 while (__block_count-- > 0) 00382 { 00383 char* __c = reinterpret_cast<char*>(__block) + __bin_size; 00384 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 00385 __block = __block->_M_next; 00386 } 00387 __block->_M_next = NULL; 00388 } 00389 } 00390 00391 __block = __bin._M_first[__thread_id]; 00392 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next; 00393 #ifdef __GTHREADS 00394 if (__gthread_active_p()) 00395 { 00396 __block->_M_thread_id = __thread_id; 00397 --__bin._M_free[__thread_id]; 00398 ++__bin._M_used[__thread_id]; 00399 } 00400 #endif 00401 00402 char* __c = reinterpret_cast<char*>(__block) + sizeof(_Block_record); 00403 return static_cast<_Tp*>(static_cast<void*>(__c)); 00404 } 00405 00406 template<typename _Tp> 00407 void 00408 __mt_alloc<_Tp>:: 00409 deallocate(pointer __p, size_type __n) 00410 { 00411 // Requests larger than _M_max_bytes are handled by operators 00412 // new/delete directly. 00413 const size_t __bytes = __n * sizeof(_Tp); 00414 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new) 00415 { 00416 ::operator delete(__p); 00417 return; 00418 } 00419 00420 // Round up to power of 2 and figure out which bin to use. 00421 const size_t __which = _S_binmap[__bytes]; 00422 const _Bin_record& __bin = _S_bin[__which]; 00423 00424 char* __c = reinterpret_cast<char*>(__p) - sizeof(_Block_record); 00425 _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 00426 00427 #ifdef __GTHREADS 00428 if (__gthread_active_p()) 00429 { 00430 // Calculate the number of records to remove from our freelist: 00431 // in order to avoid too much contention we wait until the 00432 // number of records is "high enough". 00433 const size_t __thread_id = _S_get_thread_id(); 00434 00435 long __remove = ((__bin._M_free[__thread_id] 00436 * _S_options._M_freelist_headroom) 00437 - __bin._M_used[__thread_id]); 00438 if (__remove > static_cast<long>(100 * (_S_bin_size - __which) 00439 * _S_options._M_freelist_headroom) 00440 && __remove > static_cast<long>(__bin._M_free[__thread_id])) 00441 { 00442 _Block_record* __tmp = __bin._M_first[__thread_id]; 00443 _Block_record* __first = __tmp; 00444 __remove /= _S_options._M_freelist_headroom; 00445 const long __removed = __remove; 00446 --__remove; 00447 while (__remove-- > 0) 00448 __tmp = __tmp->_M_next; 00449 __bin._M_first[__thread_id] = __tmp->_M_next; 00450 __bin._M_free[__thread_id] -= __removed; 00451 00452 __gthread_mutex_lock(__bin._M_mutex); 00453 __tmp->_M_next = __bin._M_first[0]; 00454 __bin._M_first[0] = __first; 00455 __bin._M_free[0] += __removed; 00456 __gthread_mutex_unlock(__bin._M_mutex); 00457 } 00458 00459 // Return this block to our list and update counters and 00460 // owner id as needed. 00461 --__bin._M_used[__block->_M_thread_id]; 00462 00463 __block->_M_next = __bin._M_first[__thread_id]; 00464 __bin._M_first[__thread_id] = __block; 00465 00466 ++__bin._M_free[__thread_id]; 00467 } 00468 else 00469 #endif 00470 { 00471 // Single threaded application - return to global pool. 00472 __block->_M_next = __bin._M_first[0]; 00473 __bin._M_first[0] = __block; 00474 } 00475 } 00476 00477 template<typename _Tp> 00478 void 00479 __mt_alloc<_Tp>:: 00480 _S_initialize() 00481 { 00482 if (_S_options._M_force_new) 00483 return; 00484 00485 // Calculate the number of bins required based on _M_max_bytes. 00486 // _S_bin_size is statically-initialized to one. 00487 size_t __bin_size = _S_options._M_min_bin; 00488 while (_S_options._M_max_bytes > __bin_size) 00489 { 00490 __bin_size <<= 1; 00491 ++_S_bin_size; 00492 } 00493 00494 // Setup the bin map for quick lookup of the relevant bin. 00495 const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type); 00496 _S_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 00497 00498 _Binmap_type* __bp = _S_binmap; 00499 _Binmap_type __bin_max = _S_options._M_min_bin; 00500 _Binmap_type __bint = 0; 00501 for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct) 00502 { 00503 if (__ct > __bin_max) 00504 { 00505 __bin_max <<= 1; 00506 ++__bint; 00507 } 00508 *__bp++ = __bint; 00509 } 00510 00511 // Initialize _S_bin and its members. 00512 void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size); 00513 _S_bin = static_cast<_Bin_record*>(__v); 00514 00515 // If __gthread_active_p() create and initialize the list of 00516 // free thread ids. Single threaded applications use thread id 0 00517 // directly and have no need for this. 00518 #ifdef __GTHREADS 00519 if (__gthread_active_p()) 00520 { 00521 const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads; 00522 __v = ::operator new(__k); 00523 _S_thread_freelist_first = static_cast<_Thread_record*>(__v); 00524 00525 // NOTE! The first assignable thread id is 1 since the 00526 // global pool uses id 0 00527 size_t __i; 00528 for (__i = 1; __i < _S_options._M_max_threads; ++__i) 00529 { 00530 _Thread_record& __tr = _S_thread_freelist_first[__i - 1]; 00531 __tr._M_next = &_S_thread_freelist_first[__i]; 00532 __tr._M_id = __i; 00533 } 00534 00535 // Set last record. 00536 _S_thread_freelist_first[__i - 1]._M_next = NULL; 00537 _S_thread_freelist_first[__i - 1]._M_id = __i; 00538 00539 // Make sure this is initialized. 00540 #ifndef __GTHREAD_MUTEX_INIT 00541 __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex); 00542 #endif 00543 // Initialize per thread key to hold pointer to 00544 // _S_thread_freelist. 00545 __gthread_key_create(&_S_thread_key, _S_destroy_thread_key); 00546 00547 const size_t __max_threads = _S_options._M_max_threads + 1; 00548 for (size_t __n = 0; __n < _S_bin_size; ++__n) 00549 { 00550 _Bin_record& __bin = _S_bin[__n]; 00551 __v = ::operator new(sizeof(_Block_record*) * __max_threads); 00552 __bin._M_first = static_cast<_Block_record**>(__v); 00553 00554 __v = ::operator new(sizeof(size_t) * __max_threads); 00555 __bin._M_free = static_cast<size_t*>(__v); 00556 00557 __v = ::operator new(sizeof(size_t) * __max_threads); 00558 __bin._M_used = static_cast<size_t*>(__v); 00559 00560 __v = ::operator new(sizeof(__gthread_mutex_t)); 00561 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 00562 00563 #ifdef __GTHREAD_MUTEX_INIT 00564 { 00565 // Do not copy a POSIX/gthr mutex once in use. 00566 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 00567 *__bin._M_mutex = __tmp; 00568 } 00569 #else 00570 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 00571 #endif 00572 00573 for (size_t __threadn = 0; __threadn < __max_threads; 00574 ++__threadn) 00575 { 00576 __bin._M_first[__threadn] = NULL; 00577 __bin._M_free[__threadn] = 0; 00578 __bin._M_used[__threadn] = 0; 00579 } 00580 } 00581 } 00582 else 00583 #endif 00584 for (size_t __n = 0; __n < _S_bin_size; ++__n) 00585 { 00586 _Bin_record& __bin = _S_bin[__n]; 00587 __v = ::operator new(sizeof(_Block_record*)); 00588 __bin._M_first = static_cast<_Block_record**>(__v); 00589 __bin._M_first[0] = NULL; 00590 } 00591 00592 _S_init = true; 00593 } 00594 00595 template<typename _Tp> 00596 size_t 00597 __mt_alloc<_Tp>:: 00598 _S_get_thread_id() 00599 { 00600 #ifdef __GTHREADS 00601 // If we have thread support and it's active we check the thread 00602 // key value and return its id or if it's not set we take the 00603 // first record from _S_thread_freelist and sets the key and 00604 // returns it's id. 00605 if (__gthread_active_p()) 00606 { 00607 _Thread_record* __freelist_pos = 00608 static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key)); 00609 if (__freelist_pos == NULL) 00610 { 00611 // Since _S_options._M_max_threads must be larger than 00612 // the theoretical max number of threads of the OS the 00613 // list can never be empty. 00614 __gthread_mutex_lock(&_S_thread_freelist_mutex); 00615 __freelist_pos = _S_thread_freelist_first; 00616 _S_thread_freelist_first = _S_thread_freelist_first->_M_next; 00617 __gthread_mutex_unlock(&_S_thread_freelist_mutex); 00618 00619 __gthread_setspecific(_S_thread_key, 00620 static_cast<void*>(__freelist_pos)); 00621 } 00622 return __freelist_pos->_M_id; 00623 } 00624 #endif 00625 // Otherwise (no thread support or inactive) all requests are 00626 // served from the global pool 0. 00627 return 0; 00628 } 00629 00630 #ifdef __GTHREADS 00631 template<typename _Tp> 00632 void 00633 __mt_alloc<_Tp>:: 00634 _S_destroy_thread_key(void* __freelist_pos) 00635 { 00636 // Return this thread id record to front of thread_freelist. 00637 __gthread_mutex_lock(&_S_thread_freelist_mutex); 00638 _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos); 00639 __tr->_M_next = _S_thread_freelist_first; 00640 _S_thread_freelist_first = __tr; 00641 __gthread_mutex_unlock(&_S_thread_freelist_mutex); 00642 } 00643 #endif 00644 00645 template<typename _Tp> 00646 inline bool 00647 operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&) 00648 { return true; } 00649 00650 template<typename _Tp> 00651 inline bool 00652 operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&) 00653 { return false; } 00654 00655 template<typename _Tp> 00656 bool __mt_alloc<_Tp>::_S_init = false; 00657 00658 template<typename _Tp> 00659 typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options; 00660 00661 template<typename _Tp> 00662 typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap; 00663 00664 template<typename _Tp> 00665 typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin; 00666 00667 template<typename _Tp> 00668 size_t __mt_alloc<_Tp>::_S_bin_size = 1; 00669 00670 // Actual initialization in _S_initialize(). 00671 #ifdef __GTHREADS 00672 template<typename _Tp> 00673 __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT; 00674 00675 template<typename _Tp> 00676 typename __mt_alloc<_Tp>::_Thread_record* 00677 volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL; 00678 00679 template<typename _Tp> 00680 __gthread_key_t __mt_alloc<_Tp>::_S_thread_key; 00681 00682 template<typename _Tp> 00683 __gthread_mutex_t 00684 #ifdef __GTHREAD_MUTEX_INIT 00685 __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT; 00686 #else 00687 __mt_alloc<_Tp>::_S_thread_freelist_mutex; 00688 #endif 00689 #endif 00690 } // namespace __gnu_cxx 00691 00692 #endif

Generated on Wed Jun 9 11:18:41 2004 for libstdc++-v3 Source by doxygen 1.3.7