OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gcc-4.5.1/] [libstdc++-v3/] [src/] [mt_allocator.cc] - Blame information for rev 826

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 424 jeremybenn
// Allocator details.
2
 
3
// Copyright (C) 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 3, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// Under Section 7 of GPL version 3, you are granted additional
17
// permissions described in the GCC Runtime Library Exception, version
18
// 3.1, as published by the Free Software Foundation.
19
 
20
// You should have received a copy of the GNU General Public License and
21
// a copy of the GCC Runtime Library Exception along with this program;
22
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
// <http://www.gnu.org/licenses/>.
24
 
25
//
26
// ISO C++ 14882:
27
//
28
 
29
#include <bits/c++config.h>
30
#include <ext/concurrence.h>
31
#include <ext/mt_allocator.h>
32
#include <cstring>
33
 
34
namespace
35
{
36
#ifdef __GTHREADS
37
  struct __freelist
38
  {
39
    typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
40
    _Thread_record*     _M_thread_freelist;
41
    _Thread_record*     _M_thread_freelist_array;
42
    size_t              _M_max_threads;
43
    __gthread_key_t     _M_key;
44
 
45
    ~__freelist()
46
    {
47
      if (_M_thread_freelist_array)
48
        {
49
          __gthread_key_delete(_M_key);
50
          ::operator delete(static_cast<void*>(_M_thread_freelist_array));
51
        }
52
    }
53
  };
54
 
55
  __freelist&
56
  get_freelist()
57
  {
58
    static __freelist freelist;
59
    return freelist;
60
  }
61
 
62
  __gnu_cxx::__mutex&
63
  get_freelist_mutex()
64
  {
65
    static __gnu_cxx::__mutex freelist_mutex;
66
    return freelist_mutex;
67
  }
68
 
69
  static void
70
  _M_destroy_thread_key(void* __id)
71
  {
72
    // Return this thread id record to the front of thread_freelist.
73
    __freelist& freelist = get_freelist();
74
    {
75
      __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
76
      size_t _M_id = reinterpret_cast<size_t>(__id);
77
 
78
      typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
79
      _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
80
      __tr->_M_next = freelist._M_thread_freelist;
81
      freelist._M_thread_freelist = __tr;
82
    }
83
  }
84
#endif
85
} // anonymous namespace
86
 
87
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
88
 
89
  void
90
  __pool<false>::_M_destroy() throw()
91
  {
92
    if (_M_init && !_M_options._M_force_new)
93
      {
94
        for (size_t __n = 0; __n < _M_bin_size; ++__n)
95
          {
96
            _Bin_record& __bin = _M_bin[__n];
97
            while (__bin._M_address)
98
              {
99
                _Block_address* __tmp = __bin._M_address->_M_next;
100
                ::operator delete(__bin._M_address->_M_initial);
101
                __bin._M_address = __tmp;
102
              }
103
            ::operator delete(__bin._M_first);
104
          }
105
        ::operator delete(_M_bin);
106
        ::operator delete(_M_binmap);
107
      }
108
  }
109
 
110
  void
111
  __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
112
  {
113
    // Round up to power of 2 and figure out which bin to use.
114
    const size_t __which = _M_binmap[__bytes];
115
    _Bin_record& __bin = _M_bin[__which];
116
 
117
    char* __c = __p - _M_get_align();
118
    _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
119
 
120
    // Single threaded application - return to global pool.
121
    __block->_M_next = __bin._M_first[0];
122
    __bin._M_first[0] = __block;
123
  }
124
 
125
  char*
126
  __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
127
  {
128
    // Round up to power of 2 and figure out which bin to use.
129
    const size_t __which = _M_binmap[__bytes];
130
    _Bin_record& __bin = _M_bin[__which];
131
    const _Tune& __options = _M_get_options();
132
    const size_t __bin_size = (__options._M_min_bin << __which)
133
                               + __options._M_align;
134
    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
135
    __block_count /= __bin_size;
136
 
137
    // Get a new block dynamically, set it up for use.
138
    void* __v = ::operator new(__options._M_chunk_size);
139
    _Block_address* __address = static_cast<_Block_address*>(__v);
140
    __address->_M_initial = __v;
141
    __address->_M_next = __bin._M_address;
142
    __bin._M_address = __address;
143
 
144
    char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
145
    _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
146
    __bin._M_first[__thread_id] = __block;
147
    while (--__block_count > 0)
148
      {
149
        __c += __bin_size;
150
        __block->_M_next = reinterpret_cast<_Block_record*>(__c);
151
        __block = __block->_M_next;
152
      }
153
    __block->_M_next = NULL;
154
 
155
    __block = __bin._M_first[__thread_id];
156
    __bin._M_first[__thread_id] = __block->_M_next;
157
 
158
    // NB: For alignment reasons, we can't use the first _M_align
159
    // bytes, even when sizeof(_Block_record) < _M_align.
160
    return reinterpret_cast<char*>(__block) + __options._M_align;
161
  }
162
 
163
  void
164
  __pool<false>::_M_initialize()
165
  {
166
    // _M_force_new must not change after the first allocate(), which
167
    // in turn calls this method, so if it's false, it's false forever
168
    // and we don't need to return here ever again.
169
    if (_M_options._M_force_new)
170
      {
171
        _M_init = true;
172
        return;
173
      }
174
 
175
    // Create the bins.
176
    // Calculate the number of bins required based on _M_max_bytes.
177
    // _M_bin_size is statically-initialized to one.
178
    size_t __bin_size = _M_options._M_min_bin;
179
    while (_M_options._M_max_bytes > __bin_size)
180
      {
181
        __bin_size <<= 1;
182
        ++_M_bin_size;
183
      }
184
 
185
    // Setup the bin map for quick lookup of the relevant bin.
186
    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
187
    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
188
    _Binmap_type* __bp = _M_binmap;
189
    _Binmap_type __bin_max = _M_options._M_min_bin;
190
    _Binmap_type __bint = 0;
191
    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
192
      {
193
        if (__ct > __bin_max)
194
          {
195
            __bin_max <<= 1;
196
            ++__bint;
197
          }
198
        *__bp++ = __bint;
199
      }
200
 
201
    // Initialize _M_bin and its members.
202
    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
203
    _M_bin = static_cast<_Bin_record*>(__v);
204
    for (size_t __n = 0; __n < _M_bin_size; ++__n)
205
      {
206
        _Bin_record& __bin = _M_bin[__n];
207
        __v = ::operator new(sizeof(_Block_record*));
208
        __bin._M_first = static_cast<_Block_record**>(__v);
209
        __bin._M_first[0] = NULL;
210
        __bin._M_address = NULL;
211
      }
212
    _M_init = true;
213
  }
214
 
215
 
216
#ifdef __GTHREADS
217
  void
218
  __pool<true>::_M_destroy() throw()
219
  {
220
    if (_M_init && !_M_options._M_force_new)
221
      {
222
        if (__gthread_active_p())
223
          {
224
            for (size_t __n = 0; __n < _M_bin_size; ++__n)
225
              {
226
                _Bin_record& __bin = _M_bin[__n];
227
                while (__bin._M_address)
228
                  {
229
                    _Block_address* __tmp = __bin._M_address->_M_next;
230
                    ::operator delete(__bin._M_address->_M_initial);
231
                    __bin._M_address = __tmp;
232
                  }
233
                ::operator delete(__bin._M_first);
234
                ::operator delete(__bin._M_free);
235
                ::operator delete(__bin._M_used);
236
                ::operator delete(__bin._M_mutex);
237
              }
238
          }
239
        else
240
          {
241
            for (size_t __n = 0; __n < _M_bin_size; ++__n)
242
              {
243
                _Bin_record& __bin = _M_bin[__n];
244
                while (__bin._M_address)
245
                  {
246
                    _Block_address* __tmp = __bin._M_address->_M_next;
247
                    ::operator delete(__bin._M_address->_M_initial);
248
                    __bin._M_address = __tmp;
249
                  }
250
                ::operator delete(__bin._M_first);
251
              }
252
          }
253
        ::operator delete(_M_bin);
254
        ::operator delete(_M_binmap);
255
      }
256
  }
257
 
258
  void
259
  __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
260
  {
261
    // Round up to power of 2 and figure out which bin to use.
262
    const size_t __which = _M_binmap[__bytes];
263
    const _Bin_record& __bin = _M_bin[__which];
264
 
265
    // Know __p not null, assume valid block.
266
    char* __c = __p - _M_get_align();
267
    _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
268
    if (__gthread_active_p())
269
      {
270
        // Calculate the number of records to remove from our freelist:
271
        // in order to avoid too much contention we wait until the
272
        // number of records is "high enough".
273
        const size_t __thread_id = _M_get_thread_id();
274
        const _Tune& __options = _M_get_options();
275
        const size_t __limit = (100 * (_M_bin_size - __which)
276
                                * __options._M_freelist_headroom);
277
 
278
        size_t __remove = __bin._M_free[__thread_id];
279
        __remove *= __options._M_freelist_headroom;
280
 
281
        // NB: We assume that reads of _Atomic_words are atomic.
282
        const size_t __max_threads = __options._M_max_threads + 1;
283
        _Atomic_word* const __reclaimed_base =
284
          reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
285
        const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
286
        const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
287
 
288
        // NB: For performance sake we don't resync every time, in order
289
        // to spare atomic ops.  Note that if __reclaimed increased by,
290
        // say, 1024, since the last sync, it means that the other
291
        // threads executed the atomic in the else below at least the
292
        // same number of times (at least, because _M_reserve_block may
293
        // have decreased the counter), therefore one more cannot hurt.
294
        if (__reclaimed > 1024)
295
          {
296
            __bin._M_used[__thread_id] -= __reclaimed;
297
            __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
298
          }
299
 
300
        if (__remove >= __net_used)
301
          __remove -= __net_used;
302
        else
303
          __remove = 0;
304
        if (__remove > __limit && __remove > __bin._M_free[__thread_id])
305
          {
306
            _Block_record* __first = __bin._M_first[__thread_id];
307
            _Block_record* __tmp = __first;
308
            __remove /= __options._M_freelist_headroom;
309
            const size_t __removed = __remove;
310
            while (--__remove > 0)
311
              __tmp = __tmp->_M_next;
312
            __bin._M_first[__thread_id] = __tmp->_M_next;
313
            __bin._M_free[__thread_id] -= __removed;
314
 
315
            __gthread_mutex_lock(__bin._M_mutex);
316
            __tmp->_M_next = __bin._M_first[0];
317
            __bin._M_first[0] = __first;
318
            __bin._M_free[0] += __removed;
319
            __gthread_mutex_unlock(__bin._M_mutex);
320
          }
321
 
322
        // Return this block to our list and update counters and
323
        // owner id as needed.
324
        if (__block->_M_thread_id == __thread_id)
325
          --__bin._M_used[__thread_id];
326
        else
327
          __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
328
 
329
        __block->_M_next = __bin._M_first[__thread_id];
330
        __bin._M_first[__thread_id] = __block;
331
 
332
        ++__bin._M_free[__thread_id];
333
      }
334
    else
335
      {
336
        // Not using threads, so single threaded application - return
337
        // to global pool.
338
        __block->_M_next = __bin._M_first[0];
339
        __bin._M_first[0] = __block;
340
      }
341
  }
342
 
343
  char*
344
  __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
345
  {
346
    // Round up to power of 2 and figure out which bin to use.
347
    const size_t __which = _M_binmap[__bytes];
348
    const _Tune& __options = _M_get_options();
349
    const size_t __bin_size = ((__options._M_min_bin << __which)
350
                               + __options._M_align);
351
    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
352
    __block_count /= __bin_size;
353
 
354
    // Are we using threads?
355
    // - Yes, check if there are free blocks on the global
356
    //   list. If so, grab up to __block_count blocks in one
357
    //   lock and change ownership. If the global list is 
358
    //   empty, we allocate a new chunk and add those blocks 
359
    //   directly to our own freelist (with us as owner).
360
    // - No, all operations are made directly to global pool 0
361
    //   no need to lock or change ownership but check for free
362
    //   blocks on global list (and if not add new ones) and
363
    //   get the first one.
364
    _Bin_record& __bin = _M_bin[__which];
365
    _Block_record* __block = NULL;
366
    if (__gthread_active_p())
367
      {
368
        // Resync the _M_used counters.
369
        const size_t __max_threads = __options._M_max_threads + 1;
370
        _Atomic_word* const __reclaimed_base =
371
          reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
372
        const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
373
        __bin._M_used[__thread_id] -= __reclaimed;
374
        __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
375
 
376
        __gthread_mutex_lock(__bin._M_mutex);
377
        if (__bin._M_first[0] == NULL)
378
          {
379
            void* __v = ::operator new(__options._M_chunk_size);
380
            _Block_address* __address = static_cast<_Block_address*>(__v);
381
            __address->_M_initial = __v;
382
            __address->_M_next = __bin._M_address;
383
            __bin._M_address = __address;
384
            __gthread_mutex_unlock(__bin._M_mutex);
385
 
386
            // No need to hold the lock when we are adding a whole
387
            // chunk to our own list.
388
            char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
389
            __block = reinterpret_cast<_Block_record*>(__c);
390
            __bin._M_free[__thread_id] = __block_count;
391
            __bin._M_first[__thread_id] = __block;
392
            while (--__block_count > 0)
393
              {
394
                __c += __bin_size;
395
                __block->_M_next = reinterpret_cast<_Block_record*>(__c);
396
                __block = __block->_M_next;
397
              }
398
            __block->_M_next = NULL;
399
          }
400
        else
401
          {
402
            // Is the number of required blocks greater than or equal
403
            // to the number that can be provided by the global free
404
            // list?
405
            __bin._M_first[__thread_id] = __bin._M_first[0];
406
            if (__block_count >= __bin._M_free[0])
407
              {
408
                __bin._M_free[__thread_id] = __bin._M_free[0];
409
                __bin._M_free[0] = 0;
410
                __bin._M_first[0] = NULL;
411
              }
412
            else
413
              {
414
                __bin._M_free[__thread_id] = __block_count;
415
                __bin._M_free[0] -= __block_count;
416
                __block = __bin._M_first[0];
417
                while (--__block_count > 0)
418
                  __block = __block->_M_next;
419
                __bin._M_first[0] = __block->_M_next;
420
                __block->_M_next = NULL;
421
              }
422
            __gthread_mutex_unlock(__bin._M_mutex);
423
          }
424
      }
425
    else
426
      {
427
        void* __v = ::operator new(__options._M_chunk_size);
428
        _Block_address* __address = static_cast<_Block_address*>(__v);
429
        __address->_M_initial = __v;
430
        __address->_M_next = __bin._M_address;
431
        __bin._M_address = __address;
432
 
433
        char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
434
        __block = reinterpret_cast<_Block_record*>(__c);
435
        __bin._M_first[0] = __block;
436
        while (--__block_count > 0)
437
          {
438
            __c += __bin_size;
439
            __block->_M_next = reinterpret_cast<_Block_record*>(__c);
440
            __block = __block->_M_next;
441
          }
442
        __block->_M_next = NULL;
443
      }
444
 
445
    __block = __bin._M_first[__thread_id];
446
    __bin._M_first[__thread_id] = __block->_M_next;
447
 
448
    if (__gthread_active_p())
449
      {
450
        __block->_M_thread_id = __thread_id;
451
        --__bin._M_free[__thread_id];
452
        ++__bin._M_used[__thread_id];
453
      }
454
 
455
    // NB: For alignment reasons, we can't use the first _M_align
456
    // bytes, even when sizeof(_Block_record) < _M_align.
457
    return reinterpret_cast<char*>(__block) + __options._M_align;
458
  }
459
 
460
  void
461
  __pool<true>::_M_initialize()
462
  {
463
    // _M_force_new must not change after the first allocate(),
464
    // which in turn calls this method, so if it's false, it's false
465
    // forever and we don't need to return here ever again.
466
    if (_M_options._M_force_new)
467
      {
468
        _M_init = true;
469
        return;
470
      }
471
 
472
    // Create the bins.
473
    // Calculate the number of bins required based on _M_max_bytes.
474
    // _M_bin_size is statically-initialized to one.
475
    size_t __bin_size = _M_options._M_min_bin;
476
    while (_M_options._M_max_bytes > __bin_size)
477
      {
478
        __bin_size <<= 1;
479
        ++_M_bin_size;
480
      }
481
 
482
    // Setup the bin map for quick lookup of the relevant bin.
483
    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
484
    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
485
    _Binmap_type* __bp = _M_binmap;
486
    _Binmap_type __bin_max = _M_options._M_min_bin;
487
    _Binmap_type __bint = 0;
488
    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
489
      {
490
        if (__ct > __bin_max)
491
          {
492
            __bin_max <<= 1;
493
            ++__bint;
494
          }
495
        *__bp++ = __bint;
496
      }
497
 
498
    // Initialize _M_bin and its members.
499
    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
500
    _M_bin = static_cast<_Bin_record*>(__v);
501
 
502
    // If __gthread_active_p() create and initialize the list of
503
    // free thread ids. Single threaded applications use thread id 0
504
    // directly and have no need for this.
505
    if (__gthread_active_p())
506
      {
507
        __freelist& freelist = get_freelist();
508
        {
509
          __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
510
 
511
          if (!freelist._M_thread_freelist_array
512
              || freelist._M_max_threads < _M_options._M_max_threads)
513
            {
514
              const size_t __k = sizeof(_Thread_record)
515
                                 * _M_options._M_max_threads;
516
              __v = ::operator new(__k);
517
              _M_thread_freelist = static_cast<_Thread_record*>(__v);
518
 
519
              // NOTE! The first assignable thread id is 1 since the
520
              // global pool uses id 0
521
              size_t __i;
522
              for (__i = 1; __i < _M_options._M_max_threads; ++__i)
523
                {
524
                  _Thread_record& __tr = _M_thread_freelist[__i - 1];
525
                  __tr._M_next = &_M_thread_freelist[__i];
526
                  __tr._M_id = __i;
527
                }
528
 
529
              // Set last record.
530
              _M_thread_freelist[__i - 1]._M_next = NULL;
531
              _M_thread_freelist[__i - 1]._M_id = __i;
532
 
533
              if (!freelist._M_thread_freelist_array)
534
                {
535
                  // Initialize per thread key to hold pointer to
536
                  // _M_thread_freelist.
537
                  __gthread_key_create(&freelist._M_key,
538
                                       ::_M_destroy_thread_key);
539
                  freelist._M_thread_freelist = _M_thread_freelist;
540
                }
541
              else
542
                {
543
                  _Thread_record* _M_old_freelist
544
                    = freelist._M_thread_freelist;
545
                  _Thread_record* _M_old_array
546
                    = freelist._M_thread_freelist_array;
547
                  freelist._M_thread_freelist
548
                    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
549
                  while (_M_old_freelist)
550
                    {
551
                      size_t next_id;
552
                      if (_M_old_freelist->_M_next)
553
                        next_id = _M_old_freelist->_M_next - _M_old_array;
554
                      else
555
                        next_id = freelist._M_max_threads;
556
                      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
557
                        = &_M_thread_freelist[next_id];
558
                      _M_old_freelist = _M_old_freelist->_M_next;
559
                    }
560
                  ::operator delete(static_cast<void*>(_M_old_array));
561
                }
562
              freelist._M_thread_freelist_array = _M_thread_freelist;
563
              freelist._M_max_threads = _M_options._M_max_threads;
564
            }
565
        }
566
 
567
        const size_t __max_threads = _M_options._M_max_threads + 1;
568
        for (size_t __n = 0; __n < _M_bin_size; ++__n)
569
          {
570
            _Bin_record& __bin = _M_bin[__n];
571
            __v = ::operator new(sizeof(_Block_record*) * __max_threads);
572
            std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
573
            __bin._M_first = static_cast<_Block_record**>(__v);
574
 
575
            __bin._M_address = NULL;
576
 
577
            __v = ::operator new(sizeof(size_t) * __max_threads);
578
            std::memset(__v, 0, sizeof(size_t) * __max_threads);
579
 
580
            __bin._M_free = static_cast<size_t*>(__v);
581
 
582
            __v = ::operator new(sizeof(size_t) * __max_threads
583
                                 + sizeof(_Atomic_word) * __max_threads);
584
            std::memset(__v, 0, (sizeof(size_t) * __max_threads
585
                                 + sizeof(_Atomic_word) * __max_threads));
586
            __bin._M_used = static_cast<size_t*>(__v);
587
 
588
            __v = ::operator new(sizeof(__gthread_mutex_t));
589
            __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
590
 
591
#ifdef __GTHREAD_MUTEX_INIT
592
            {
593
              // Do not copy a POSIX/gthr mutex once in use.
594
              __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
595
              *__bin._M_mutex = __tmp;
596
            }
597
#else
598
            { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
599
#endif
600
          }
601
      }
602
    else
603
      {
604
        for (size_t __n = 0; __n < _M_bin_size; ++__n)
605
          {
606
            _Bin_record& __bin = _M_bin[__n];
607
            __v = ::operator new(sizeof(_Block_record*));
608
            __bin._M_first = static_cast<_Block_record**>(__v);
609
            __bin._M_first[0] = NULL;
610
            __bin._M_address = NULL;
611
          }
612
      }
613
    _M_init = true;
614
  }
615
 
616
  size_t
617
  __pool<true>::_M_get_thread_id()
618
  {
619
    // If we have thread support and it's active we check the thread
620
    // key value and return its id or if it's not set we take the
621
    // first record from _M_thread_freelist and sets the key and
622
    // returns its id.
623
    if (__gthread_active_p())
624
      {
625
        __freelist& freelist = get_freelist();
626
        void* v = __gthread_getspecific(freelist._M_key);
627
        size_t _M_id = (size_t)v;
628
        if (_M_id == 0)
629
          {
630
            {
631
              __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
632
              if (freelist._M_thread_freelist)
633
                {
634
                  _M_id = freelist._M_thread_freelist->_M_id;
635
                  freelist._M_thread_freelist
636
                    = freelist._M_thread_freelist->_M_next;
637
                }
638
            }
639
 
640
            __gthread_setspecific(freelist._M_key, (void*)_M_id);
641
          }
642
        return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
643
      }
644
 
645
    // Otherwise (no thread support or inactive) all requests are
646
    // served from the global pool 0.
647
    return 0;
648
  }
649
 
650
  // XXX GLIBCXX_ABI Deprecated
651
  void
652
  __pool<true>::_M_destroy_thread_key(void*) throw () { }
653
 
654
  // XXX GLIBCXX_ABI Deprecated
655
  void
656
  __pool<true>::_M_initialize(__destroy_handler)
657
  {
658
    // _M_force_new must not change after the first allocate(),
659
    // which in turn calls this method, so if it's false, it's false
660
    // forever and we don't need to return here ever again.
661
    if (_M_options._M_force_new)
662
      {
663
        _M_init = true;
664
        return;
665
      }
666
 
667
    // Create the bins.
668
    // Calculate the number of bins required based on _M_max_bytes.
669
    // _M_bin_size is statically-initialized to one.
670
    size_t __bin_size = _M_options._M_min_bin;
671
    while (_M_options._M_max_bytes > __bin_size)
672
      {
673
        __bin_size <<= 1;
674
        ++_M_bin_size;
675
      }
676
 
677
    // Setup the bin map for quick lookup of the relevant bin.
678
    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
679
    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
680
    _Binmap_type* __bp = _M_binmap;
681
    _Binmap_type __bin_max = _M_options._M_min_bin;
682
    _Binmap_type __bint = 0;
683
    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
684
      {
685
        if (__ct > __bin_max)
686
          {
687
            __bin_max <<= 1;
688
            ++__bint;
689
          }
690
        *__bp++ = __bint;
691
      }
692
 
693
    // Initialize _M_bin and its members.
694
    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
695
    _M_bin = static_cast<_Bin_record*>(__v);
696
 
697
    // If __gthread_active_p() create and initialize the list of
698
    // free thread ids. Single threaded applications use thread id 0
699
    // directly and have no need for this.
700
    if (__gthread_active_p())
701
      {
702
        __freelist& freelist = get_freelist();
703
        {
704
          __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
705
 
706
          if (!freelist._M_thread_freelist_array
707
              || freelist._M_max_threads < _M_options._M_max_threads)
708
            {
709
              const size_t __k = sizeof(_Thread_record)
710
                                 * _M_options._M_max_threads;
711
              __v = ::operator new(__k);
712
              _M_thread_freelist = static_cast<_Thread_record*>(__v);
713
 
714
              // NOTE! The first assignable thread id is 1 since the
715
              // global pool uses id 0
716
              size_t __i;
717
              for (__i = 1; __i < _M_options._M_max_threads; ++__i)
718
                {
719
                  _Thread_record& __tr = _M_thread_freelist[__i - 1];
720
                  __tr._M_next = &_M_thread_freelist[__i];
721
                  __tr._M_id = __i;
722
                }
723
 
724
              // Set last record.
725
              _M_thread_freelist[__i - 1]._M_next = NULL;
726
              _M_thread_freelist[__i - 1]._M_id = __i;
727
 
728
              if (!freelist._M_thread_freelist_array)
729
                {
730
                  // Initialize per thread key to hold pointer to
731
                  // _M_thread_freelist.
732
                  __gthread_key_create(&freelist._M_key,
733
                                       ::_M_destroy_thread_key);
734
                  freelist._M_thread_freelist = _M_thread_freelist;
735
                }
736
              else
737
                {
738
                  _Thread_record* _M_old_freelist
739
                    = freelist._M_thread_freelist;
740
                  _Thread_record* _M_old_array
741
                    = freelist._M_thread_freelist_array;
742
                  freelist._M_thread_freelist
743
                    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
744
                  while (_M_old_freelist)
745
                    {
746
                      size_t next_id;
747
                      if (_M_old_freelist->_M_next)
748
                        next_id = _M_old_freelist->_M_next - _M_old_array;
749
                      else
750
                        next_id = freelist._M_max_threads;
751
                      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
752
                        = &_M_thread_freelist[next_id];
753
                      _M_old_freelist = _M_old_freelist->_M_next;
754
                    }
755
                  ::operator delete(static_cast<void*>(_M_old_array));
756
                }
757
              freelist._M_thread_freelist_array = _M_thread_freelist;
758
              freelist._M_max_threads = _M_options._M_max_threads;
759
            }
760
        }
761
 
762
        const size_t __max_threads = _M_options._M_max_threads + 1;
763
        for (size_t __n = 0; __n < _M_bin_size; ++__n)
764
          {
765
            _Bin_record& __bin = _M_bin[__n];
766
            __v = ::operator new(sizeof(_Block_record*) * __max_threads);
767
            std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
768
            __bin._M_first = static_cast<_Block_record**>(__v);
769
 
770
            __bin._M_address = NULL;
771
 
772
            __v = ::operator new(sizeof(size_t) * __max_threads);
773
            std::memset(__v, 0, sizeof(size_t) * __max_threads);
774
            __bin._M_free = static_cast<size_t*>(__v);
775
 
776
            __v = ::operator new(sizeof(size_t) * __max_threads +
777
                                 sizeof(_Atomic_word) * __max_threads);
778
            std::memset(__v, 0, (sizeof(size_t) * __max_threads
779
                                 + sizeof(_Atomic_word) * __max_threads));
780
            __bin._M_used = static_cast<size_t*>(__v);
781
 
782
            __v = ::operator new(sizeof(__gthread_mutex_t));
783
            __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
784
 
785
#ifdef __GTHREAD_MUTEX_INIT
786
            {
787
              // Do not copy a POSIX/gthr mutex once in use.
788
              __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
789
              *__bin._M_mutex = __tmp;
790
            }
791
#else
792
            { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
793
#endif
794
          }
795
      }
796
    else
797
      {
798
        for (size_t __n = 0; __n < _M_bin_size; ++__n)
799
          {
800
            _Bin_record& __bin = _M_bin[__n];
801
            __v = ::operator new(sizeof(_Block_record*));
802
            __bin._M_first = static_cast<_Block_record**>(__v);
803
            __bin._M_first[0] = NULL;
804
            __bin._M_address = NULL;
805
          }
806
      }
807
    _M_init = true;
808
  }
809
#endif
810
 
811
  // Instantiations.
812
  template class __mt_alloc<char>;
813
  template class __mt_alloc<wchar_t>;
814
 
815
_GLIBCXX_END_NAMESPACE

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.