OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [ggc-zone.c] - Blame information for rev 199

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* "Bag-of-pages" zone garbage collector for the GNU compiler.
2
   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3
   Free Software Foundation, Inc.
4
 
5
   Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6
   (dberlin@dberlin.org).  Rewritten by Daniel Jacobowitz
7
   <dan@codesourcery.com>.
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify it under
12
the terms of the GNU General Public License as published by the Free
13
Software Foundation; either version 3, or (at your option) any later
14
version.
15
 
16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17
WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19
for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "tree.h"
30
#include "rtl.h"
31
#include "tm_p.h"
32
#include "toplev.h"
33
#include "varray.h"
34
#include "flags.h"
35
#include "ggc.h"
36
#include "timevar.h"
37
#include "params.h"
38
#include "bitmap.h"
39
 
40
#ifdef ENABLE_VALGRIND_CHECKING
41
# ifdef HAVE_VALGRIND_MEMCHECK_H
42
#  include <valgrind/memcheck.h>
43
# elif defined HAVE_MEMCHECK_H
44
#  include <memcheck.h>
45
# else
46
#  include <valgrind.h>
47
# endif
48
#else
49
/* Avoid #ifdef:s when we can help it.  */
50
#define VALGRIND_DISCARD(x)
51
#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52
#define VALGRIND_FREELIKE_BLOCK(x,y)
53
#endif
54
 
55
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
56
   file open.  Prefer either to valloc.  */
57
#ifdef HAVE_MMAP_ANON
58
# undef HAVE_MMAP_DEV_ZERO
59
 
60
# include <sys/mman.h>
61
# ifndef MAP_FAILED
62
#  define MAP_FAILED -1
63
# endif
64
# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
65
#  define MAP_ANONYMOUS MAP_ANON
66
# endif
67
# define USING_MMAP
68
#endif
69
 
70
#ifdef HAVE_MMAP_DEV_ZERO
71
# include <sys/mman.h>
72
# ifndef MAP_FAILED
73
#  define MAP_FAILED -1
74
# endif
75
# define USING_MMAP
76
#endif
77
 
78
#ifndef USING_MMAP
79
#error Zone collector requires mmap
80
#endif
81
 
82
#if (GCC_VERSION < 3001)
83
#define prefetch(X) ((void) X)
84
#define prefetchw(X) ((void) X)
85
#else
86
#define prefetch(X) __builtin_prefetch (X)
87
#define prefetchw(X) __builtin_prefetch (X, 1, 3)
88
#endif
89
 
90
/* FUTURE NOTES:
91
 
92
   If we track inter-zone pointers, we can mark single zones at a
93
   time.
94
 
95
   If we have a zone where we guarantee no inter-zone pointers, we
96
   could mark that zone separately.
97
 
98
   The garbage zone should not be marked, and we should return 1 in
99
   ggc_set_mark for any object in the garbage zone, which cuts off
100
   marking quickly.  */
101
 
102
/* Strategy:
103
 
104
   This garbage-collecting allocator segregates objects into zones.
105
   It also segregates objects into "large" and "small" bins.  Large
106
   objects are greater than page size.
107
 
108
   Pages for small objects are broken up into chunks.  The page has
109
   a bitmap which marks the start position of each chunk (whether
110
   allocated or free).  Free chunks are on one of the zone's free
111
   lists and contain a pointer to the next free chunk.  Chunks in
112
   most of the free lists have a fixed size determined by the
113
   free list.  Chunks in the "other" sized free list have their size
114
   stored right after their chain pointer.
115
 
116
   Empty pages (of all sizes) are kept on a single page cache list,
117
   and are considered first when new pages are required; they are
118
   deallocated at the start of the next collection if they haven't
119
   been recycled by then.  The free page list is currently per-zone.  */
120
 
121
/* Define GGC_DEBUG_LEVEL to print debugging information.
122
     0: No debugging output.
123
     1: GC statistics only.
124
     2: Page-entry allocations/deallocations as well.
125
     3: Object allocations as well.
126
     4: Object marks as well.  */
127
#define GGC_DEBUG_LEVEL (0)
128
 
129
#ifndef HOST_BITS_PER_PTR
130
#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
131
#endif
132
 
133
/* This structure manages small free chunks.  The SIZE field is only
134
   initialized if the chunk is in the "other" sized free list.  Large
135
   chunks are allocated one at a time to their own page, and so don't
136
   come in here.  */
137
 
138
struct alloc_chunk {
139
  struct alloc_chunk *next_free;
140
  unsigned int size;
141
};
142
 
143
/* The size of the fixed-size portion of a small page descriptor.  */
144
#define PAGE_OVERHEAD   (offsetof (struct small_page_entry, alloc_bits))
145
 
146
/* The collector's idea of the page size.  This must be a power of two
147
   no larger than the system page size, because pages must be aligned
148
   to this amount and are tracked at this granularity in the page
149
   table.  We choose a size at compile time for efficiency.
150
 
151
   We could make a better guess at compile time if PAGE_SIZE is a
152
   constant in system headers, and PAGE_SHIFT is defined...  */
153
#define GGC_PAGE_SIZE   4096
154
#define GGC_PAGE_MASK   (GGC_PAGE_SIZE - 1)
155
#define GGC_PAGE_SHIFT  12
156
 
157
#if 0
158
/* Alternative definitions which use the runtime page size.  */
159
#define GGC_PAGE_SIZE   G.pagesize
160
#define GGC_PAGE_MASK   G.page_mask
161
#define GGC_PAGE_SHIFT  G.lg_pagesize
162
#endif
163
 
164
/* The size of a small page managed by the garbage collector.  This
165
   must currently be GGC_PAGE_SIZE, but with a few changes could
166
   be any multiple of it to reduce certain kinds of overhead.  */
167
#define SMALL_PAGE_SIZE GGC_PAGE_SIZE
168
 
169
/* Free bin information.  These numbers may be in need of re-tuning.
170
   In general, decreasing the number of free bins would seem to
171
   increase the time it takes to allocate... */
172
 
173
/* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
174
   today.  */
175
 
176
#define NUM_FREE_BINS           64
177
#define FREE_BIN_DELTA          MAX_ALIGNMENT
178
#define SIZE_BIN_DOWN(SIZE)     ((SIZE) / FREE_BIN_DELTA)
179
 
180
/* Allocation and marking parameters.  */
181
 
182
/* The smallest allocatable unit to keep track of.  */
183
#define BYTES_PER_ALLOC_BIT     MAX_ALIGNMENT
184
 
185
/* The smallest markable unit.  If we require each allocated object
186
   to contain at least two allocatable units, we can use half as many
187
   bits for the mark bitmap.  But this adds considerable complexity
188
   to sweeping.  */
189
#define BYTES_PER_MARK_BIT      BYTES_PER_ALLOC_BIT
190
 
191
#define BYTES_PER_MARK_WORD     (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
192
 
193
/* We use this structure to determine the alignment required for
194
   allocations.
195
 
196
   There are several things wrong with this estimation of alignment.
197
 
198
   The maximum alignment for a structure is often less than the
199
   maximum alignment for a basic data type; for instance, on some
200
   targets long long must be aligned to sizeof (int) in a structure
201
   and sizeof (long long) in a variable.  i386-linux is one example;
202
   Darwin is another (sometimes, depending on the compiler in use).
203
 
204
   Also, long double is not included.  Nothing in GCC uses long
205
   double, so we assume that this is OK.  On powerpc-darwin, adding
206
   long double would bring the maximum alignment up to 16 bytes,
207
   and until we need long double (or to vectorize compiler operations)
208
   that's painfully wasteful.  This will need to change, some day.  */
209
 
210
struct max_alignment {
211
  char c;
212
  union {
213
    HOST_WIDEST_INT i;
214
    double d;
215
  } u;
216
};
217
 
218
/* The biggest alignment required.  */
219
 
220
#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
221
 
222
/* Compute the smallest multiple of F that is >= X.  */
223
 
224
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
225
 
226
/* Types to use for the allocation and mark bitmaps.  It might be
227
   a good idea to add ffsl to libiberty and use unsigned long
228
   instead; that could speed us up where long is wider than int.  */
229
 
230
typedef unsigned int alloc_type;
231
typedef unsigned int mark_type;
232
#define alloc_ffs(x) ffs(x)
233
 
234
/* A page_entry records the status of an allocation page.  This is the
235
   common data between all three kinds of pages - small, large, and
236
   PCH.  */
237
typedef struct page_entry
238
{
239
  /* The address at which the memory is allocated.  */
240
  char *page;
241
 
242
  /* The zone that this page entry belongs to.  */
243
  struct alloc_zone *zone;
244
 
245
#ifdef GATHER_STATISTICS
246
  /* How many collections we've survived.  */
247
  size_t survived;
248
#endif
249
 
250
  /* Does this page contain small objects, or one large object?  */
251
  bool large_p;
252
 
253
  /* Is this page part of the loaded PCH?  */
254
  bool pch_p;
255
} page_entry;
256
 
257
/* Additional data needed for small pages.  */
258
struct small_page_entry
259
{
260
  struct page_entry common;
261
 
262
  /* The next small page entry, or NULL if this is the last.  */
263
  struct small_page_entry *next;
264
 
265
  /* If currently marking this zone, a pointer to the mark bits
266
     for this page.  If we aren't currently marking this zone,
267
     this pointer may be stale (pointing to freed memory).  */
268
  mark_type *mark_bits;
269
 
270
  /* The allocation bitmap.  This array extends far enough to have
271
     one bit for every BYTES_PER_ALLOC_BIT bytes in the page.  */
272
  alloc_type alloc_bits[1];
273
};
274
 
275
/* Additional data needed for large pages.  */
276
struct large_page_entry
277
{
278
  struct page_entry common;
279
 
280
  /* The next large page entry, or NULL if this is the last.  */
281
  struct large_page_entry *next;
282
 
283
  /* The number of bytes allocated, not including the page entry.  */
284
  size_t bytes;
285
 
286
  /* The previous page in the list, so that we can unlink this one.  */
287
  struct large_page_entry *prev;
288
 
289
  /* During marking, is this object marked?  */
290
  bool mark_p;
291
};
292
 
293
/* A two-level tree is used to look up the page-entry for a given
294
   pointer.  Two chunks of the pointer's bits are extracted to index
295
   the first and second levels of the tree, as follows:
296
 
297
                                   HOST_PAGE_SIZE_BITS
298
                           32           |      |
299
       msb +----------------+----+------+------+ lsb
300
                            |    |      |
301
                         PAGE_L1_BITS   |
302
                                 |      |
303
                               PAGE_L2_BITS
304
 
305
   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
306
   pages are aligned on system page boundaries.  The next most
307
   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
308
   index values in the lookup table, respectively.
309
 
310
   For 32-bit architectures and the settings below, there are no
311
   leftover bits.  For architectures with wider pointers, the lookup
312
   tree points to a list of pages, which must be scanned to find the
313
   correct one.  */
314
 
315
#define PAGE_L1_BITS    (8)
316
#define PAGE_L2_BITS    (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
317
#define PAGE_L1_SIZE    ((size_t) 1 << PAGE_L1_BITS)
318
#define PAGE_L2_SIZE    ((size_t) 1 << PAGE_L2_BITS)
319
 
320
#define LOOKUP_L1(p) \
321
  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
322
 
323
#define LOOKUP_L2(p) \
324
  (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
325
 
326
#if HOST_BITS_PER_PTR <= 32
327
 
328
/* On 32-bit hosts, we use a two level page table, as pictured above.  */
329
typedef page_entry **page_table[PAGE_L1_SIZE];
330
 
331
#else
332
 
333
/* On 64-bit hosts, we use the same two level page tables plus a linked
334
   list that disambiguates the top 32-bits.  There will almost always be
335
   exactly one entry in the list.  */
336
typedef struct page_table_chain
337
{
338
  struct page_table_chain *next;
339
  size_t high_bits;
340
  page_entry **table[PAGE_L1_SIZE];
341
} *page_table;
342
 
343
#endif
344
 
345
/* The global variables.  */
346
static struct globals
347
{
348
  /* The linked list of zones.  */
349
  struct alloc_zone *zones;
350
 
351
  /* Lookup table for associating allocation pages with object addresses.  */
352
  page_table lookup;
353
 
354
  /* The system's page size, and related constants.  */
355
  size_t pagesize;
356
  size_t lg_pagesize;
357
  size_t page_mask;
358
 
359
  /* The size to allocate for a small page entry.  This includes
360
     the size of the structure and the size of the allocation
361
     bitmap.  */
362
  size_t small_page_overhead;
363
 
364
#if defined (HAVE_MMAP_DEV_ZERO)
365
  /* A file descriptor open to /dev/zero for reading.  */
366
  int dev_zero_fd;
367
#endif
368
 
369
  /* Allocate pages in chunks of this size, to throttle calls to memory
370
     allocation routines.  The first page is used, the rest go onto the
371
     free list.  */
372
  size_t quire_size;
373
 
374
  /* The file descriptor for debugging output.  */
375
  FILE *debug_file;
376
} G;
377
 
378
/* A zone allocation structure.  There is one of these for every
379
   distinct allocation zone.  */
380
struct alloc_zone
381
{
382
  /* The most recent free chunk is saved here, instead of in the linked
383
     free list, to decrease list manipulation.  It is most likely that we
384
     will want this one.  */
385
  char *cached_free;
386
  size_t cached_free_size;
387
 
388
  /* Linked lists of free storage.  Slots 1 ... NUM_FREE_BINS have chunks of size
389
     FREE_BIN_DELTA.  All other chunks are in slot 0.  */
390
  struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
391
 
392
  /* The highest bin index which might be non-empty.  It may turn out
393
     to be empty, in which case we have to search downwards.  */
394
  size_t high_free_bin;
395
 
396
  /* Bytes currently allocated in this zone.  */
397
  size_t allocated;
398
 
399
  /* Linked list of the small pages in this zone.  */
400
  struct small_page_entry *pages;
401
 
402
  /* Doubly linked list of large pages in this zone.  */
403
  struct large_page_entry *large_pages;
404
 
405
  /* If we are currently marking this zone, a pointer to the mark bits.  */
406
  mark_type *mark_bits;
407
 
408
  /* Name of the zone.  */
409
  const char *name;
410
 
411
  /* The number of small pages currently allocated in this zone.  */
412
  size_t n_small_pages;
413
 
414
  /* Bytes allocated at the end of the last collection.  */
415
  size_t allocated_last_gc;
416
 
417
  /* Total amount of memory mapped.  */
418
  size_t bytes_mapped;
419
 
420
  /* A cache of free system pages.  */
421
  struct small_page_entry *free_pages;
422
 
423
  /* Next zone in the linked list of zones.  */
424
  struct alloc_zone *next_zone;
425
 
426
  /* True if this zone was collected during this collection.  */
427
  bool was_collected;
428
 
429
  /* True if this zone should be destroyed after the next collection.  */
430
  bool dead;
431
 
432
#ifdef GATHER_STATISTICS
433
  struct
434
  {
435
    /* Total memory allocated with ggc_alloc.  */
436
    unsigned long long total_allocated;
437
    /* Total overhead for memory to be allocated with ggc_alloc.  */
438
    unsigned long long total_overhead;
439
 
440
    /* Total allocations and overhead for sizes less than 32, 64 and 128.
441
       These sizes are interesting because they are typical cache line
442
       sizes.  */
443
 
444
    unsigned long long total_allocated_under32;
445
    unsigned long long total_overhead_under32;
446
 
447
    unsigned long long total_allocated_under64;
448
    unsigned long long total_overhead_under64;
449
 
450
    unsigned long long total_allocated_under128;
451
    unsigned long long total_overhead_under128;
452
  } stats;
453
#endif
454
} main_zone;
455
 
456
/* Some default zones.  */
457
struct alloc_zone rtl_zone;
458
struct alloc_zone tree_zone;
459
struct alloc_zone tree_id_zone;
460
 
461
/* The PCH zone does not need a normal zone structure, and it does
462
   not live on the linked list of zones.  */
463
struct pch_zone
464
{
465
  /* The start of the PCH zone.  NULL if there is none.  */
466
  char *page;
467
 
468
  /* The end of the PCH zone.  NULL if there is none.  */
469
  char *end;
470
 
471
  /* The size of the PCH zone.  0 if there is none.  */
472
  size_t bytes;
473
 
474
  /* The allocation bitmap for the PCH zone.  */
475
  alloc_type *alloc_bits;
476
 
477
  /* If we are currently marking, the mark bitmap for the PCH zone.
478
     When it is first read in, we could avoid marking the PCH,
479
     because it will not contain any pointers to GC memory outside
480
     of the PCH; however, the PCH is currently mapped as writable,
481
     so we must mark it in case new pointers are added.  */
482
  mark_type *mark_bits;
483
} pch_zone;
484
 
485
#ifdef USING_MMAP
486
static char *alloc_anon (char *, size_t, struct alloc_zone *);
487
#endif
488
static struct small_page_entry * alloc_small_page (struct alloc_zone *);
489
static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
490
static void free_chunk (char *, size_t, struct alloc_zone *);
491
static void free_small_page (struct small_page_entry *);
492
static void free_large_page (struct large_page_entry *);
493
static void release_pages (struct alloc_zone *);
494
static void sweep_pages (struct alloc_zone *);
495
static bool ggc_collect_1 (struct alloc_zone *, bool);
496
static void new_ggc_zone_1 (struct alloc_zone *, const char *);
497
 
498
/* Traverse the page table and find the entry for a page.
499
   Die (probably) if the object wasn't allocated via GC.  */
500
 
501
static inline page_entry *
502
lookup_page_table_entry (const void *p)
503
{
504
  page_entry ***base;
505
  size_t L1, L2;
506
 
507
#if HOST_BITS_PER_PTR <= 32
508
  base = &G.lookup[0];
509
#else
510
  page_table table = G.lookup;
511
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
512
  while (table->high_bits != high_bits)
513
    table = table->next;
514
  base = &table->table[0];
515
#endif
516
 
517
  /* Extract the level 1 and 2 indices.  */
518
  L1 = LOOKUP_L1 (p);
519
  L2 = LOOKUP_L2 (p);
520
 
521
  return base[L1][L2];
522
}
523
 
524
/* Set the page table entry for the page that starts at P.  If ENTRY
525
   is NULL, clear the entry.  */
526
 
527
static void
528
set_page_table_entry (void *p, page_entry *entry)
529
{
530
  page_entry ***base;
531
  size_t L1, L2;
532
 
533
#if HOST_BITS_PER_PTR <= 32
534
  base = &G.lookup[0];
535
#else
536
  page_table table;
537
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
538
  for (table = G.lookup; table; table = table->next)
539
    if (table->high_bits == high_bits)
540
      goto found;
541
 
542
  /* Not found -- allocate a new table.  */
543
  table = xcalloc (1, sizeof(*table));
544
  table->next = G.lookup;
545
  table->high_bits = high_bits;
546
  G.lookup = table;
547
found:
548
  base = &table->table[0];
549
#endif
550
 
551
  /* Extract the level 1 and 2 indices.  */
552
  L1 = LOOKUP_L1 (p);
553
  L2 = LOOKUP_L2 (p);
554
 
555
  if (base[L1] == NULL)
556
    base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
557
 
558
  base[L1][L2] = entry;
559
}
560
 
561
/* Find the page table entry associated with OBJECT.  */
562
 
563
static inline struct page_entry *
564
zone_get_object_page (const void *object)
565
{
566
  return lookup_page_table_entry (object);
567
}
568
 
569
/* Find which element of the alloc_bits array OBJECT should be
570
   recorded in.  */
571
static inline unsigned int
572
zone_get_object_alloc_word (const void *object)
573
{
574
  return (((size_t) object & (GGC_PAGE_SIZE - 1))
575
          / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
576
}
577
 
578
/* Find which bit of the appropriate word in the alloc_bits array
579
   OBJECT should be recorded in.  */
580
static inline unsigned int
581
zone_get_object_alloc_bit (const void *object)
582
{
583
  return (((size_t) object / BYTES_PER_ALLOC_BIT)
584
          % (8 * sizeof (alloc_type)));
585
}
586
 
587
/* Find which element of the mark_bits array OBJECT should be recorded
588
   in.  */
589
static inline unsigned int
590
zone_get_object_mark_word (const void *object)
591
{
592
  return (((size_t) object & (GGC_PAGE_SIZE - 1))
593
          / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
594
}
595
 
596
/* Find which bit of the appropriate word in the mark_bits array
597
   OBJECT should be recorded in.  */
598
static inline unsigned int
599
zone_get_object_mark_bit (const void *object)
600
{
601
  return (((size_t) object / BYTES_PER_MARK_BIT)
602
          % (8 * sizeof (mark_type)));
603
}
604
 
605
/* Set the allocation bit corresponding to OBJECT in its page's
606
   bitmap.  Used to split this object from the preceding one.  */
607
static inline void
608
zone_set_object_alloc_bit (const void *object)
609
{
610
  struct small_page_entry *page
611
    = (struct small_page_entry *) zone_get_object_page (object);
612
  unsigned int start_word = zone_get_object_alloc_word (object);
613
  unsigned int start_bit = zone_get_object_alloc_bit (object);
614
 
615
  page->alloc_bits[start_word] |= 1L << start_bit;
616
}
617
 
618
/* Clear the allocation bit corresponding to OBJECT in PAGE's
619
   bitmap.  Used to coalesce this object with the preceding
620
   one.  */
621
static inline void
622
zone_clear_object_alloc_bit (struct small_page_entry *page,
623
                             const void *object)
624
{
625
  unsigned int start_word = zone_get_object_alloc_word (object);
626
  unsigned int start_bit = zone_get_object_alloc_bit (object);
627
 
628
  /* Would xor be quicker?  */
629
  page->alloc_bits[start_word] &= ~(1L << start_bit);
630
}
631
 
632
/* Find the size of the object which starts at START_WORD and
633
   START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
634
   Helper function for ggc_get_size and zone_find_object_size.  */
635
 
636
static inline size_t
637
zone_object_size_1 (alloc_type *alloc_bits,
638
                    size_t start_word, size_t start_bit,
639
                    size_t max_size)
640
{
641
  size_t size;
642
  alloc_type alloc_word;
643
  int indx;
644
 
645
  /* Load the first word.  */
646
  alloc_word = alloc_bits[start_word++];
647
 
648
  /* If that was the last bit in this word, we'll want to continue
649
     with the next word.  Otherwise, handle the rest of this word.  */
650
  if (start_bit)
651
    {
652
      indx = alloc_ffs (alloc_word >> start_bit);
653
      if (indx)
654
        /* indx is 1-based.  We started at the bit after the object's
655
           start, but we also ended at the bit after the object's end.
656
           It cancels out.  */
657
        return indx * BYTES_PER_ALLOC_BIT;
658
 
659
      /* The extra 1 accounts for the starting unit, before start_bit.  */
660
      size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
661
 
662
      if (size >= max_size)
663
        return max_size;
664
 
665
      alloc_word = alloc_bits[start_word++];
666
    }
667
  else
668
    size = BYTES_PER_ALLOC_BIT;
669
 
670
  while (alloc_word == 0)
671
    {
672
      size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
673
      if (size >= max_size)
674
        return max_size;
675
      alloc_word = alloc_bits[start_word++];
676
    }
677
 
678
  indx = alloc_ffs (alloc_word);
679
  return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
680
}
681
 
682
/* Find the size of OBJECT on small page PAGE.  */
683
 
684
static inline size_t
685
zone_find_object_size (struct small_page_entry *page,
686
                       const void *object)
687
{
688
  const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
689
  unsigned int start_word = zone_get_object_alloc_word (object_midptr);
690
  unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
691
  size_t max_size = (page->common.page + SMALL_PAGE_SIZE
692
                     - (char *) object);
693
 
694
  return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
695
                             max_size);
696
}
697
 
698
/* Allocate the mark bits for every zone, and set the pointers on each
699
   page.  */
700
static void
701
zone_allocate_marks (void)
702
{
703
  struct alloc_zone *zone;
704
 
705
  for (zone = G.zones; zone; zone = zone->next_zone)
706
    {
707
      struct small_page_entry *page;
708
      mark_type *cur_marks;
709
      size_t mark_words, mark_words_per_page;
710
#ifdef ENABLE_CHECKING
711
      size_t n = 0;
712
#endif
713
 
714
      mark_words_per_page
715
        = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
716
      mark_words = zone->n_small_pages * mark_words_per_page;
717
      zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
718
                                                   mark_words);
719
      cur_marks = zone->mark_bits;
720
      for (page = zone->pages; page; page = page->next)
721
        {
722
          page->mark_bits = cur_marks;
723
          cur_marks += mark_words_per_page;
724
#ifdef ENABLE_CHECKING
725
          n++;
726
#endif
727
        }
728
#ifdef ENABLE_CHECKING
729
      gcc_assert (n == zone->n_small_pages);
730
#endif
731
    }
732
 
733
  /* We don't collect the PCH zone, but we do have to mark it
734
     (for now).  */
735
  if (pch_zone.bytes)
736
    pch_zone.mark_bits
737
      = (mark_type *) xcalloc (sizeof (mark_type),
738
                               CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
739
}
740
 
741
/* After marking and sweeping, release the memory used for mark bits.  */
742
static void
743
zone_free_marks (void)
744
{
745
  struct alloc_zone *zone;
746
 
747
  for (zone = G.zones; zone; zone = zone->next_zone)
748
    if (zone->mark_bits)
749
      {
750
        free (zone->mark_bits);
751
        zone->mark_bits = NULL;
752
      }
753
 
754
  if (pch_zone.bytes)
755
    {
756
      free (pch_zone.mark_bits);
757
      pch_zone.mark_bits = NULL;
758
    }
759
}
760
 
761
#ifdef USING_MMAP
762
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
763
   (if non-null).  The ifdef structure here is intended to cause a
764
   compile error unless exactly one of the HAVE_* is defined.  */
765
 
766
static inline char *
767
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
768
{
769
#ifdef HAVE_MMAP_ANON
770
  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
771
                              MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
772
#endif
773
#ifdef HAVE_MMAP_DEV_ZERO
774
  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
775
                              MAP_PRIVATE, G.dev_zero_fd, 0);
776
#endif
777
 
778
  if (page == (char *) MAP_FAILED)
779
    {
780
      perror ("virtual memory exhausted");
781
      exit (FATAL_EXIT_CODE);
782
    }
783
 
784
  /* Remember that we allocated this memory.  */
785
  zone->bytes_mapped += size;
786
 
787
  /* Pretend we don't have access to the allocated pages.  We'll enable
788
     access to smaller pieces of the area in ggc_alloc.  Discard the
789
     handle to avoid handle leak.  */
790
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
791
 
792
  return page;
793
}
794
#endif
795
 
796
/* Allocate a new page for allocating small objects in ZONE, and
797
   return an entry for it.  */
798
 
799
static struct small_page_entry *
800
alloc_small_page (struct alloc_zone *zone)
801
{
802
  struct small_page_entry *entry;
803
 
804
  /* Check the list of free pages for one we can use.  */
805
  entry = zone->free_pages;
806
  if (entry != NULL)
807
    {
808
      /* Recycle the allocated memory from this page ...  */
809
      zone->free_pages = entry->next;
810
    }
811
  else
812
    {
813
      /* We want just one page.  Allocate a bunch of them and put the
814
         extras on the freelist.  (Can only do this optimization with
815
         mmap for backing store.)  */
816
      struct small_page_entry *e, *f = zone->free_pages;
817
      int i;
818
      char *page;
819
 
820
      page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
821
 
822
      /* This loop counts down so that the chain will be in ascending
823
         memory order.  */
824
      for (i = G.quire_size - 1; i >= 1; i--)
825
        {
826
          e = xcalloc (1, G.small_page_overhead);
827
          e->common.page = page + (i << GGC_PAGE_SHIFT);
828
          e->common.zone = zone;
829
          e->next = f;
830
          f = e;
831
          set_page_table_entry (e->common.page, &e->common);
832
        }
833
 
834
      zone->free_pages = f;
835
 
836
      entry = xcalloc (1, G.small_page_overhead);
837
      entry->common.page = page;
838
      entry->common.zone = zone;
839
      set_page_table_entry (page, &entry->common);
840
    }
841
 
842
  zone->n_small_pages++;
843
 
844
  if (GGC_DEBUG_LEVEL >= 2)
845
    fprintf (G.debug_file,
846
             "Allocating %s page at %p, data %p-%p\n",
847
             entry->common.zone->name, (PTR) entry, entry->common.page,
848
             entry->common.page + SMALL_PAGE_SIZE - 1);
849
 
850
  return entry;
851
}
852
 
853
/* Allocate a large page of size SIZE in ZONE.  */
854
 
855
static struct large_page_entry *
856
alloc_large_page (size_t size, struct alloc_zone *zone)
857
{
858
  struct large_page_entry *entry;
859
  char *page;
860
  size_t needed_size;
861
 
862
  needed_size = size + sizeof (struct large_page_entry);
863
  page = xmalloc (needed_size);
864
 
865
  entry = (struct large_page_entry *) page;
866
 
867
  entry->next = NULL;
868
  entry->common.page = page + sizeof (struct large_page_entry);
869
  entry->common.large_p = true;
870
  entry->common.pch_p = false;
871
  entry->common.zone = zone;
872
#ifdef GATHER_STATISTICS
873
  entry->common.survived = 0;
874
#endif
875
  entry->mark_p = false;
876
  entry->bytes = size;
877
  entry->prev = NULL;
878
 
879
  set_page_table_entry (entry->common.page, &entry->common);
880
 
881
  if (GGC_DEBUG_LEVEL >= 2)
882
    fprintf (G.debug_file,
883
             "Allocating %s large page at %p, data %p-%p\n",
884
             entry->common.zone->name, (PTR) entry, entry->common.page,
885
             entry->common.page + SMALL_PAGE_SIZE - 1);
886
 
887
  return entry;
888
}
889
 
890
 
891
/* For a page that is no longer needed, put it on the free page list.  */
892
 
893
static inline void
894
free_small_page (struct small_page_entry *entry)
895
{
896
  if (GGC_DEBUG_LEVEL >= 2)
897
    fprintf (G.debug_file,
898
             "Deallocating %s page at %p, data %p-%p\n",
899
             entry->common.zone->name, (PTR) entry,
900
             entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
901
 
902
  gcc_assert (!entry->common.large_p);
903
 
904
  /* Mark the page as inaccessible.  Discard the handle to
905
     avoid handle leak.  */
906
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
907
                                            SMALL_PAGE_SIZE));
908
 
909
  entry->next = entry->common.zone->free_pages;
910
  entry->common.zone->free_pages = entry;
911
  entry->common.zone->n_small_pages--;
912
}
913
 
914
/* Release a large page that is no longer needed.  */
915
 
916
static inline void
917
free_large_page (struct large_page_entry *entry)
918
{
919
  if (GGC_DEBUG_LEVEL >= 2)
920
    fprintf (G.debug_file,
921
             "Deallocating %s page at %p, data %p-%p\n",
922
             entry->common.zone->name, (PTR) entry,
923
             entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
924
 
925
  gcc_assert (entry->common.large_p);
926
 
927
  set_page_table_entry (entry->common.page, NULL);
928
  free (entry);
929
}
930
 
931
/* Release the free page cache to the system.  */
932
 
933
static void
934
release_pages (struct alloc_zone *zone)
935
{
936
#ifdef USING_MMAP
937
  struct small_page_entry *p, *next;
938
  char *start;
939
  size_t len;
940
 
941
  /* Gather up adjacent pages so they are unmapped together.  */
942
  p = zone->free_pages;
943
 
944
  while (p)
945
    {
946
      start = p->common.page;
947
      next = p->next;
948
      len = SMALL_PAGE_SIZE;
949
      set_page_table_entry (p->common.page, NULL);
950
      p = next;
951
 
952
      while (p && p->common.page == start + len)
953
        {
954
          next = p->next;
955
          len += SMALL_PAGE_SIZE;
956
          set_page_table_entry (p->common.page, NULL);
957
          p = next;
958
        }
959
 
960
      munmap (start, len);
961
      zone->bytes_mapped -= len;
962
    }
963
 
964
  zone->free_pages = NULL;
965
#endif
966
}
967
 
968
/* Place the block at PTR of size SIZE on the free list for ZONE.  */
969
 
970
static inline void
971
free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
972
{
973
  struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
974
  size_t bin = 0;
975
 
976
  bin = SIZE_BIN_DOWN (size);
977
  gcc_assert (bin != 0);
978
  if (bin > NUM_FREE_BINS)
979
    {
980
      bin = 0;
981
      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
982
      chunk->size = size;
983
      chunk->next_free = zone->free_chunks[bin];
984
      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
985
                                                size - sizeof (struct alloc_chunk)));
986
    }
987
  else
988
    {
989
      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
990
      chunk->next_free = zone->free_chunks[bin];
991
      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
992
                                                size - sizeof (struct alloc_chunk *)));
993
    }
994
 
995
  zone->free_chunks[bin] = chunk;
996
  if (bin > zone->high_free_bin)
997
    zone->high_free_bin = bin;
998
  if (GGC_DEBUG_LEVEL >= 3)
999
    fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1000
}
1001
 
1002
/* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE.  */
1003
 
1004
void *
1005
ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1006
                     MEM_STAT_DECL)
1007
{
1008
  size_t bin;
1009
  size_t csize;
1010
  struct small_page_entry *entry;
1011
  struct alloc_chunk *chunk, **pp;
1012
  void *result;
1013
  size_t size = orig_size;
1014
 
1015
  /* Make sure that zero-sized allocations get a unique and freeable
1016
     pointer.  */
1017
  if (size == 0)
1018
    size = MAX_ALIGNMENT;
1019
  else
1020
    size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1021
 
1022
  /* Try to allocate the object from several different sources.  Each
1023
     of these cases is responsible for setting RESULT and SIZE to
1024
     describe the allocated block, before jumping to FOUND.  If a
1025
     chunk is split, the allocate bit for the new chunk should also be
1026
     set.
1027
 
1028
     Large objects are handled specially.  However, they'll just fail
1029
     the next couple of conditions, so we can wait to check for them
1030
     below.  The large object case is relatively rare (< 1%), so this
1031
     is a win.  */
1032
 
1033
  /* First try to split the last chunk we allocated.  For best
1034
     fragmentation behavior it would be better to look for a
1035
     free bin of the appropriate size for a small object.  However,
1036
     we're unlikely (1% - 7%) to find one, and this gives better
1037
     locality behavior anyway.  This case handles the lion's share
1038
     of all calls to this function.  */
1039
  if (size <= zone->cached_free_size)
1040
    {
1041
      result = zone->cached_free;
1042
 
1043
      zone->cached_free_size -= size;
1044
      if (zone->cached_free_size)
1045
        {
1046
          zone->cached_free += size;
1047
          zone_set_object_alloc_bit (zone->cached_free);
1048
        }
1049
 
1050
      goto found;
1051
    }
1052
 
1053
  /* Next, try to find a free bin of the exactly correct size.  */
1054
 
1055
  /* We want to round SIZE up, rather than down, but we know it's
1056
     already aligned to at least FREE_BIN_DELTA, so we can just
1057
     shift.  */
1058
  bin = SIZE_BIN_DOWN (size);
1059
 
1060
  if (bin <= NUM_FREE_BINS
1061
      && (chunk = zone->free_chunks[bin]) != NULL)
1062
    {
1063
      /* We have a chunk of the right size.  Pull it off the free list
1064
         and use it.  */
1065
 
1066
      zone->free_chunks[bin] = chunk->next_free;
1067
 
1068
      /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1069
         == FREE_BIN_DELTA.  */
1070
      result = chunk;
1071
 
1072
      /* The allocation bits are already set correctly.  HIGH_FREE_BIN
1073
         may now be wrong, if this was the last chunk in the high bin.
1074
         Rather than fixing it up now, wait until we need to search
1075
         the free bins.  */
1076
 
1077
      goto found;
1078
    }
1079
 
1080
  /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1081
     to split.  We can find one in the too-big bin, or in the largest
1082
     sized bin with a chunk in it.  Try the largest normal-sized bin
1083
     first.  */
1084
 
1085
  if (zone->high_free_bin > bin)
1086
    {
1087
      /* Find the highest numbered free bin.  It will be at or below
1088
         the watermark.  */
1089
      while (zone->high_free_bin > bin
1090
             && zone->free_chunks[zone->high_free_bin] == NULL)
1091
        zone->high_free_bin--;
1092
 
1093
      if (zone->high_free_bin > bin)
1094
        {
1095
          size_t tbin = zone->high_free_bin;
1096
          chunk = zone->free_chunks[tbin];
1097
 
1098
          /* Remove the chunk from its previous bin.  */
1099
          zone->free_chunks[tbin] = chunk->next_free;
1100
 
1101
          result = (char *) chunk;
1102
 
1103
          /* Save the rest of the chunk for future allocation.  */
1104
          if (zone->cached_free_size)
1105
            free_chunk (zone->cached_free, zone->cached_free_size, zone);
1106
 
1107
          chunk = (struct alloc_chunk *) ((char *) result + size);
1108
          zone->cached_free = (char *) chunk;
1109
          zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1110
 
1111
          /* Mark the new free chunk as an object, so that we can
1112
             find the size of the newly allocated object.  */
1113
          zone_set_object_alloc_bit (chunk);
1114
 
1115
          /* HIGH_FREE_BIN may now be wrong, if this was the last
1116
             chunk in the high bin.  Rather than fixing it up now,
1117
             wait until we need to search the free bins.  */
1118
 
1119
          goto found;
1120
        }
1121
    }
1122
 
1123
  /* Failing that, look through the "other" bucket for a chunk
1124
     that is large enough.  */
1125
  pp = &(zone->free_chunks[0]);
1126
  chunk = *pp;
1127
  while (chunk && chunk->size < size)
1128
    {
1129
      pp = &chunk->next_free;
1130
      chunk = *pp;
1131
    }
1132
 
1133
  if (chunk)
1134
    {
1135
      /* Remove the chunk from its previous bin.  */
1136
      *pp = chunk->next_free;
1137
 
1138
      result = (char *) chunk;
1139
 
1140
      /* Save the rest of the chunk for future allocation, if there's any
1141
         left over.  */
1142
      csize = chunk->size;
1143
      if (csize > size)
1144
        {
1145
          if (zone->cached_free_size)
1146
            free_chunk (zone->cached_free, zone->cached_free_size, zone);
1147
 
1148
          chunk = (struct alloc_chunk *) ((char *) result + size);
1149
          zone->cached_free = (char *) chunk;
1150
          zone->cached_free_size = csize - size;
1151
 
1152
          /* Mark the new free chunk as an object.  */
1153
          zone_set_object_alloc_bit (chunk);
1154
        }
1155
 
1156
      goto found;
1157
    }
1158
 
1159
  /* Handle large allocations.  We could choose any threshold between
1160
     GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1161
     GGC_PAGE_SIZE.  It can't be smaller, because then it wouldn't
1162
     be guaranteed to have a unique entry in the lookup table.  Large
1163
     allocations will always fall through to here.  */
1164
  if (size > GGC_PAGE_SIZE)
1165
    {
1166
      struct large_page_entry *entry = alloc_large_page (size, zone);
1167
 
1168
#ifdef GATHER_STATISTICS
1169
      entry->common.survived = 0;
1170
#endif
1171
 
1172
      entry->next = zone->large_pages;
1173
      if (zone->large_pages)
1174
        zone->large_pages->prev = entry;
1175
      zone->large_pages = entry;
1176
 
1177
      result = entry->common.page;
1178
 
1179
      goto found;
1180
    }
1181
 
1182
  /* Failing everything above, allocate a new small page.  */
1183
 
1184
  entry = alloc_small_page (zone);
1185
  entry->next = zone->pages;
1186
  zone->pages = entry;
1187
 
1188
  /* Mark the first chunk in the new page.  */
1189
  entry->alloc_bits[0] = 1;
1190
 
1191
  result = entry->common.page;
1192
  if (size < SMALL_PAGE_SIZE)
1193
    {
1194
      if (zone->cached_free_size)
1195
        free_chunk (zone->cached_free, zone->cached_free_size, zone);
1196
 
1197
      zone->cached_free = (char *) result + size;
1198
      zone->cached_free_size = SMALL_PAGE_SIZE - size;
1199
 
1200
      /* Mark the new free chunk as an object.  */
1201
      zone_set_object_alloc_bit (zone->cached_free);
1202
    }
1203
 
1204
 found:
1205
 
1206
  /* We could save TYPE in the chunk, but we don't use that for
1207
     anything yet.  If we wanted to, we could do it by adding it
1208
     either before the beginning of the chunk or after its end,
1209
     and adjusting the size and pointer appropriately.  */
1210
 
1211
  /* We'll probably write to this after we return.  */
1212
  prefetchw (result);
1213
 
1214
#ifdef ENABLE_GC_CHECKING
1215
  /* `Poison' the entire allocated object.  */
1216
  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1217
  memset (result, 0xaf, size);
1218
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
1219
                                            size - orig_size));
1220
#endif
1221
 
1222
  /* Tell Valgrind that the memory is there, but its content isn't
1223
     defined.  The bytes at the end of the object are still marked
1224
     unaccessible.  */
1225
  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
1226
 
1227
  /* Keep track of how many bytes are being allocated.  This
1228
     information is used in deciding when to collect.  */
1229
  zone->allocated += size;
1230
 
1231
  timevar_ggc_mem_total += size;
1232
 
1233
#ifdef GATHER_STATISTICS
1234
  ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1235
 
1236
  {
1237
    size_t object_size = size;
1238
    size_t overhead = object_size - orig_size;
1239
 
1240
    zone->stats.total_overhead += overhead;
1241
    zone->stats.total_allocated += object_size;
1242
 
1243
    if (orig_size <= 32)
1244
      {
1245
        zone->stats.total_overhead_under32 += overhead;
1246
        zone->stats.total_allocated_under32 += object_size;
1247
      }
1248
    if (orig_size <= 64)
1249
      {
1250
        zone->stats.total_overhead_under64 += overhead;
1251
        zone->stats.total_allocated_under64 += object_size;
1252
      }
1253
    if (orig_size <= 128)
1254
      {
1255
        zone->stats.total_overhead_under128 += overhead;
1256
        zone->stats.total_allocated_under128 += object_size;
1257
      }
1258
  }
1259
#endif
1260
 
1261
  if (GGC_DEBUG_LEVEL >= 3)
1262
    fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1263
             (unsigned long) size, result);
1264
 
1265
  return result;
1266
}
1267
 
1268
/* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1269
   for that type.  */
1270
 
1271
void *
1272
ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1273
                      MEM_STAT_DECL)
1274
{
1275
  switch (gte)
1276
    {
1277
    case gt_ggc_e_14lang_tree_node:
1278
      return ggc_alloc_zone_pass_stat (size, &tree_zone);
1279
 
1280
    case gt_ggc_e_7rtx_def:
1281
      return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1282
 
1283
    case gt_ggc_e_9rtvec_def:
1284
      return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1285
 
1286
    default:
1287
      return ggc_alloc_zone_pass_stat (size, &main_zone);
1288
    }
1289
}
1290
 
1291
/* Normal ggc_alloc simply allocates into the main zone.  */
1292
 
1293
void *
1294
ggc_alloc_stat (size_t size MEM_STAT_DECL)
1295
{
1296
  return ggc_alloc_zone_pass_stat (size, &main_zone);
1297
}
1298
 
1299
/* Poison the chunk.  */
1300
#ifdef ENABLE_GC_CHECKING
1301
#define poison_region(PTR, SIZE) \
1302
  memset ((PTR), 0xa5, (SIZE))
1303
#else
1304
#define poison_region(PTR, SIZE)
1305
#endif
1306
 
1307
/* Free the object at P.  */
1308
 
1309
void
1310
ggc_free (void *p)
1311
{
1312
  struct page_entry *page;
1313
 
1314
#ifdef GATHER_STATISTICS
1315
  ggc_free_overhead (p);
1316
#endif
1317
 
1318
  poison_region (p, ggc_get_size (p));
1319
 
1320
  page = zone_get_object_page (p);
1321
 
1322
  if (page->large_p)
1323
    {
1324
      struct large_page_entry *large_page
1325
        = (struct large_page_entry *) page;
1326
 
1327
      /* Remove the page from the linked list.  */
1328
      if (large_page->prev)
1329
        large_page->prev->next = large_page->next;
1330
      else
1331
        {
1332
          gcc_assert (large_page->common.zone->large_pages == large_page);
1333
          large_page->common.zone->large_pages = large_page->next;
1334
        }
1335
      if (large_page->next)
1336
        large_page->next->prev = large_page->prev;
1337
 
1338
      large_page->common.zone->allocated -= large_page->bytes;
1339
 
1340
      /* Release the memory associated with this object.  */
1341
      free_large_page (large_page);
1342
    }
1343
  else if (page->pch_p)
1344
    /* Don't do anything.  We won't allocate a new object from the
1345
       PCH zone so there's no point in releasing anything.  */
1346
    ;
1347
  else
1348
    {
1349
      size_t size = ggc_get_size (p);
1350
 
1351
      page->zone->allocated -= size;
1352
 
1353
      /* Add the chunk to the free list.  We don't bother with coalescing,
1354
         since we are likely to want a chunk of this size again.  */
1355
      free_chunk (p, size, page->zone);
1356
    }
1357
}
1358
 
1359
/* If P is not marked, mark it and return false.  Otherwise return true.
1360
   P must have been allocated by the GC allocator; it mustn't point to
1361
   static objects, stack variables, or memory allocated with malloc.  */
1362
 
1363
int
1364
ggc_set_mark (const void *p)
1365
{
1366
  struct page_entry *page;
1367
  const char *ptr = (const char *) p;
1368
 
1369
  page = zone_get_object_page (p);
1370
 
1371
  if (page->pch_p)
1372
    {
1373
      size_t mark_word, mark_bit, offset;
1374
      offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1375
      mark_word = offset / (8 * sizeof (mark_type));
1376
      mark_bit = offset % (8 * sizeof (mark_type));
1377
 
1378
      if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1379
        return 1;
1380
      pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1381
    }
1382
  else if (page->large_p)
1383
    {
1384
      struct large_page_entry *large_page
1385
        = (struct large_page_entry *) page;
1386
 
1387
      if (large_page->mark_p)
1388
        return 1;
1389
      large_page->mark_p = true;
1390
    }
1391
  else
1392
    {
1393
      struct small_page_entry *small_page
1394
        = (struct small_page_entry *) page;
1395
 
1396
      if (small_page->mark_bits[zone_get_object_mark_word (p)]
1397
          & (1 << zone_get_object_mark_bit (p)))
1398
        return 1;
1399
      small_page->mark_bits[zone_get_object_mark_word (p)]
1400
        |= (1 << zone_get_object_mark_bit (p));
1401
    }
1402
 
1403
  if (GGC_DEBUG_LEVEL >= 4)
1404
    fprintf (G.debug_file, "Marking %p\n", p);
1405
 
1406
  return 0;
1407
}
1408
 
1409
/* Return 1 if P has been marked, zero otherwise.
1410
   P must have been allocated by the GC allocator; it mustn't point to
1411
   static objects, stack variables, or memory allocated with malloc.  */
1412
 
1413
int
1414
ggc_marked_p (const void *p)
1415
{
1416
  struct page_entry *page;
1417
  const char *ptr = p;
1418
 
1419
  page = zone_get_object_page (p);
1420
 
1421
  if (page->pch_p)
1422
    {
1423
      size_t mark_word, mark_bit, offset;
1424
      offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1425
      mark_word = offset / (8 * sizeof (mark_type));
1426
      mark_bit = offset % (8 * sizeof (mark_type));
1427
 
1428
      return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1429
    }
1430
 
1431
  if (page->large_p)
1432
    {
1433
      struct large_page_entry *large_page
1434
        = (struct large_page_entry *) page;
1435
 
1436
      return large_page->mark_p;
1437
    }
1438
  else
1439
    {
1440
      struct small_page_entry *small_page
1441
        = (struct small_page_entry *) page;
1442
 
1443
      return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1444
                   & (1 << zone_get_object_mark_bit (p)));
1445
    }
1446
}
1447
 
1448
/* Return the size of the gc-able object P.  */
1449
 
1450
size_t
1451
ggc_get_size (const void *p)
1452
{
1453
  struct page_entry *page;
1454
  const char *ptr = (const char *) p;
1455
 
1456
  page = zone_get_object_page (p);
1457
 
1458
  if (page->pch_p)
1459
    {
1460
      size_t alloc_word, alloc_bit, offset, max_size;
1461
      offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1462
      alloc_word = offset / (8 * sizeof (alloc_type));
1463
      alloc_bit = offset % (8 * sizeof (alloc_type));
1464
      max_size = pch_zone.bytes - (ptr - pch_zone.page);
1465
      return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1466
                                 max_size);
1467
    }
1468
 
1469
  if (page->large_p)
1470
    return ((struct large_page_entry *)page)->bytes;
1471
  else
1472
    return zone_find_object_size ((struct small_page_entry *) page, p);
1473
}
1474
 
1475
/* Initialize the ggc-zone-mmap allocator.  */
1476
void
1477
init_ggc (void)
1478
{
1479
  /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1480
     a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1481
     the current assumptions to hold.  */
1482
 
1483
  gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1484
 
1485
  /* Set up the main zone by hand.  */
1486
  main_zone.name = "Main zone";
1487
  G.zones = &main_zone;
1488
 
1489
  /* Allocate the default zones.  */
1490
  new_ggc_zone_1 (&rtl_zone, "RTL zone");
1491
  new_ggc_zone_1 (&tree_zone, "Tree zone");
1492
  new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1493
 
1494
  G.pagesize = getpagesize();
1495
  G.lg_pagesize = exact_log2 (G.pagesize);
1496
  G.page_mask = ~(G.pagesize - 1);
1497
 
1498
  /* Require the system page size to be a multiple of GGC_PAGE_SIZE.  */
1499
  gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1500
 
1501
  /* Allocate 16 system pages at a time.  */
1502
  G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1503
 
1504
  /* Calculate the size of the allocation bitmap and other overhead.  */
1505
  /* Right now we allocate bits for the page header and bitmap.  These
1506
     are wasted, but a little tricky to eliminate.  */
1507
  G.small_page_overhead
1508
    = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1509
  /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1510
 
1511
#ifdef HAVE_MMAP_DEV_ZERO
1512
  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1513
  gcc_assert (G.dev_zero_fd != -1);
1514
#endif
1515
 
1516
#if 0
1517
  G.debug_file = fopen ("ggc-mmap.debug", "w");
1518
  setlinebuf (G.debug_file);
1519
#else
1520
  G.debug_file = stdout;
1521
#endif
1522
 
1523
#ifdef USING_MMAP
1524
  /* StunOS has an amazing off-by-one error for the first mmap allocation
1525
     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1526
     believe, is an unaligned page allocation, which would cause us to
1527
     hork badly if we tried to use it.  */
1528
  {
1529
    char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1530
    struct small_page_entry *e;
1531
    if ((size_t)p & (G.pagesize - 1))
1532
      {
1533
        /* How losing.  Discard this one and try another.  If we still
1534
           can't get something useful, give up.  */
1535
 
1536
        p = alloc_anon (NULL, G.pagesize, &main_zone);
1537
        gcc_assert (!((size_t)p & (G.pagesize - 1)));
1538
      }
1539
 
1540
    if (GGC_PAGE_SIZE == G.pagesize)
1541
      {
1542
        /* We have a good page, might as well hold onto it...  */
1543
        e = xcalloc (1, G.small_page_overhead);
1544
        e->common.page = p;
1545
        e->common.zone = &main_zone;
1546
        e->next = main_zone.free_pages;
1547
        set_page_table_entry (e->common.page, &e->common);
1548
        main_zone.free_pages = e;
1549
      }
1550
    else
1551
      {
1552
        munmap (p, G.pagesize);
1553
      }
1554
  }
1555
#endif
1556
}
1557
 
1558
/* Start a new GGC zone.  */
1559
 
1560
static void
1561
new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1562
{
1563
  new_zone->name = name;
1564
  new_zone->next_zone = G.zones->next_zone;
1565
  G.zones->next_zone = new_zone;
1566
}
1567
 
1568
struct alloc_zone *
1569
new_ggc_zone (const char * name)
1570
{
1571
  struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1572
  new_ggc_zone_1 (new_zone, name);
1573
  return new_zone;
1574
}
1575
 
1576
/* Destroy a GGC zone.  */
1577
void
1578
destroy_ggc_zone (struct alloc_zone * dead_zone)
1579
{
1580
  struct alloc_zone *z;
1581
 
1582
  for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1583
    /* Just find that zone.  */
1584
    continue;
1585
 
1586
  /* We should have found the zone in the list.  Anything else is fatal.  */
1587
  gcc_assert (z);
1588
 
1589
  /* z is dead, baby. z is dead.  */
1590
  z->dead = true;
1591
}
1592
 
1593
/* Free all empty pages and objects within a page for a given zone  */
1594
 
1595
static void
1596
sweep_pages (struct alloc_zone *zone)
1597
{
1598
  struct large_page_entry **lpp, *lp, *lnext;
1599
  struct small_page_entry **spp, *sp, *snext;
1600
  char *last_free;
1601
  size_t allocated = 0;
1602
  bool nomarksinpage;
1603
 
1604
  /* First, reset the free_chunks lists, since we are going to
1605
     re-free free chunks in hopes of coalescing them into large chunks.  */
1606
  memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1607
  zone->high_free_bin = 0;
1608
  zone->cached_free = NULL;
1609
  zone->cached_free_size = 0;
1610
 
1611
  /* Large pages are all or none affairs. Either they are completely
1612
     empty, or they are completely full.  */
1613
  lpp = &zone->large_pages;
1614
  for (lp = zone->large_pages; lp != NULL; lp = lnext)
1615
    {
1616
      gcc_assert (lp->common.large_p);
1617
 
1618
      lnext = lp->next;
1619
 
1620
#ifdef GATHER_STATISTICS
1621
      /* This page has now survived another collection.  */
1622
      lp->common.survived++;
1623
#endif
1624
 
1625
      if (lp->mark_p)
1626
        {
1627
          lp->mark_p = false;
1628
          allocated += lp->bytes;
1629
          lpp = &lp->next;
1630
        }
1631
      else
1632
        {
1633
          *lpp = lnext;
1634
#ifdef ENABLE_GC_CHECKING
1635
          /* Poison the page.  */
1636
          memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1637
#endif
1638
          if (lp->prev)
1639
            lp->prev->next = lp->next;
1640
          if (lp->next)
1641
            lp->next->prev = lp->prev;
1642
          free_large_page (lp);
1643
        }
1644
    }
1645
 
1646
  spp = &zone->pages;
1647
  for (sp = zone->pages; sp != NULL; sp = snext)
1648
    {
1649
      char *object, *last_object;
1650
      char *end;
1651
      alloc_type *alloc_word_p;
1652
      mark_type *mark_word_p;
1653
 
1654
      gcc_assert (!sp->common.large_p);
1655
 
1656
      snext = sp->next;
1657
 
1658
#ifdef GATHER_STATISTICS
1659
      /* This page has now survived another collection.  */
1660
      sp->common.survived++;
1661
#endif
1662
 
1663
      /* Step through all chunks, consolidate those that are free and
1664
         insert them into the free lists.  Note that consolidation
1665
         slows down collection slightly.  */
1666
 
1667
      last_object = object = sp->common.page;
1668
      end = sp->common.page + SMALL_PAGE_SIZE;
1669
      last_free = NULL;
1670
      nomarksinpage = true;
1671
      mark_word_p = sp->mark_bits;
1672
      alloc_word_p = sp->alloc_bits;
1673
 
1674
      gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1675
 
1676
      object = sp->common.page;
1677
      do
1678
        {
1679
          unsigned int i, n;
1680
          alloc_type alloc_word;
1681
          mark_type mark_word;
1682
 
1683
          alloc_word = *alloc_word_p++;
1684
          mark_word = *mark_word_p++;
1685
 
1686
          if (mark_word)
1687
            nomarksinpage = false;
1688
 
1689
          /* There ought to be some way to do this without looping...  */
1690
          i = 0;
1691
          while ((n = alloc_ffs (alloc_word)) != 0)
1692
            {
1693
              /* Extend the current state for n - 1 bits.  We can't
1694
                 shift alloc_word by n, even though it isn't used in the
1695
                 loop, in case only the highest bit was set.  */
1696
              alloc_word >>= n - 1;
1697
              mark_word >>= n - 1;
1698
              object += BYTES_PER_MARK_BIT * (n - 1);
1699
 
1700
              if (mark_word & 1)
1701
                {
1702
                  if (last_free)
1703
                    {
1704
                      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1705
                                                                object
1706
                                                                - last_free));
1707
                      poison_region (last_free, object - last_free);
1708
                      free_chunk (last_free, object - last_free, zone);
1709
                      last_free = NULL;
1710
                    }
1711
                  else
1712
                    allocated += object - last_object;
1713
                  last_object = object;
1714
                }
1715
              else
1716
                {
1717
                  if (last_free == NULL)
1718
                    {
1719
                      last_free = object;
1720
                      allocated += object - last_object;
1721
                    }
1722
                  else
1723
                    zone_clear_object_alloc_bit (sp, object);
1724
                }
1725
 
1726
              /* Shift to just after the alloc bit we handled.  */
1727
              alloc_word >>= 1;
1728
              mark_word >>= 1;
1729
              object += BYTES_PER_MARK_BIT;
1730
 
1731
              i += n;
1732
            }
1733
 
1734
          object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1735
        }
1736
      while (object < end);
1737
 
1738
      if (nomarksinpage)
1739
        {
1740
          *spp = snext;
1741
#ifdef ENABLE_GC_CHECKING
1742
          VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
1743
          /* Poison the page.  */
1744
          memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1745
#endif
1746
          free_small_page (sp);
1747
          continue;
1748
        }
1749
      else if (last_free)
1750
        {
1751
          VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1752
                                                    object - last_free));
1753
          poison_region (last_free, object - last_free);
1754
          free_chunk (last_free, object - last_free, zone);
1755
        }
1756
      else
1757
        allocated += object - last_object;
1758
 
1759
      spp = &sp->next;
1760
    }
1761
 
1762
  zone->allocated = allocated;
1763
}
1764
 
1765
/* mark-and-sweep routine for collecting a single zone.  NEED_MARKING
1766
   is true if we need to mark before sweeping, false if some other
1767
   zone collection has already performed marking for us.  Returns true
1768
   if we collected, false otherwise.  */
1769
 
1770
static bool
1771
ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1772
{
1773
#if 0
1774
  /* */
1775
  {
1776
    int i;
1777
    for (i = 0; i < NUM_FREE_BINS + 1; i++)
1778
      {
1779
        struct alloc_chunk *chunk;
1780
        int n, tot;
1781
 
1782
        n = 0;
1783
        tot = 0;
1784
        chunk = zone->free_chunks[i];
1785
        while (chunk)
1786
          {
1787
            n++;
1788
            tot += chunk->size;
1789
            chunk = chunk->next_free;
1790
          }
1791
        fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1792
                 i, n, tot);
1793
      }
1794
  }
1795
  /* */
1796
#endif
1797
 
1798
  if (!quiet_flag)
1799
    fprintf (stderr, " {%s GC %luk -> ",
1800
             zone->name, (unsigned long) zone->allocated / 1024);
1801
 
1802
  /* Zero the total allocated bytes.  This will be recalculated in the
1803
     sweep phase.  */
1804
  zone->allocated = 0;
1805
 
1806
  /* Release the pages we freed the last time we collected, but didn't
1807
     reuse in the interim.  */
1808
  release_pages (zone);
1809
 
1810
  if (need_marking)
1811
    {
1812
      zone_allocate_marks ();
1813
      ggc_mark_roots ();
1814
#ifdef GATHER_STATISTICS
1815
      ggc_prune_overhead_list ();
1816
#endif
1817
    }
1818
 
1819
  sweep_pages (zone);
1820
  zone->was_collected = true;
1821
  zone->allocated_last_gc = zone->allocated;
1822
 
1823
  if (!quiet_flag)
1824
    fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1825
  return true;
1826
}
1827
 
1828
#ifdef GATHER_STATISTICS
1829
/* Calculate the average page survival rate in terms of number of
1830
   collections.  */
1831
 
1832
static float
1833
calculate_average_page_survival (struct alloc_zone *zone)
1834
{
1835
  float count = 0.0;
1836
  float survival = 0.0;
1837
  struct small_page_entry *p;
1838
  struct large_page_entry *lp;
1839
  for (p = zone->pages; p; p = p->next)
1840
    {
1841
      count += 1.0;
1842
      survival += p->common.survived;
1843
    }
1844
  for (lp = zone->large_pages; lp; lp = lp->next)
1845
    {
1846
      count += 1.0;
1847
      survival += lp->common.survived;
1848
    }
1849
  return survival/count;
1850
}
1851
#endif
1852
 
1853
/* Top level collection routine.  */
1854
 
1855
void
1856
ggc_collect (void)
1857
{
1858
  struct alloc_zone *zone;
1859
  bool marked = false;
1860
 
1861
  timevar_push (TV_GC);
1862
 
1863
  if (!ggc_force_collect)
1864
    {
1865
      float allocated_last_gc = 0, allocated = 0, min_expand;
1866
 
1867
      for (zone = G.zones; zone; zone = zone->next_zone)
1868
        {
1869
          allocated_last_gc += zone->allocated_last_gc;
1870
          allocated += zone->allocated;
1871
        }
1872
 
1873
      allocated_last_gc =
1874
        MAX (allocated_last_gc,
1875
             (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1876
      min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1877
 
1878
      if (allocated < allocated_last_gc + min_expand)
1879
        {
1880
          timevar_pop (TV_GC);
1881
          return;
1882
        }
1883
    }
1884
 
1885
  /* Start by possibly collecting the main zone.  */
1886
  main_zone.was_collected = false;
1887
  marked |= ggc_collect_1 (&main_zone, true);
1888
 
1889
  /* In order to keep the number of collections down, we don't
1890
     collect other zones unless we are collecting the main zone.  This
1891
     gives us roughly the same number of collections as we used to
1892
     have with the old gc.  The number of collection is important
1893
     because our main slowdown (according to profiling) is now in
1894
     marking.  So if we mark twice as often as we used to, we'll be
1895
     twice as slow.  Hopefully we'll avoid this cost when we mark
1896
     zone-at-a-time.  */
1897
  /* NOTE drow/2004-07-28: We now always collect the main zone, but
1898
     keep this code in case the heuristics are further refined.  */
1899
 
1900
  if (main_zone.was_collected)
1901
    {
1902
      struct alloc_zone *zone;
1903
 
1904
      for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1905
        {
1906
          zone->was_collected = false;
1907
          marked |= ggc_collect_1 (zone, !marked);
1908
        }
1909
    }
1910
 
1911
#ifdef GATHER_STATISTICS
1912
  /* Print page survival stats, if someone wants them.  */
1913
  if (GGC_DEBUG_LEVEL >= 2)
1914
    {
1915
      for (zone = G.zones; zone; zone = zone->next_zone)
1916
        {
1917
          if (zone->was_collected)
1918
            {
1919
              float f = calculate_average_page_survival (zone);
1920
              printf ("Average page survival in zone `%s' is %f\n",
1921
                      zone->name, f);
1922
            }
1923
        }
1924
    }
1925
#endif
1926
 
1927
  if (marked)
1928
    zone_free_marks ();
1929
 
1930
  /* Free dead zones.  */
1931
  for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1932
    {
1933
      if (zone->next_zone->dead)
1934
        {
1935
          struct alloc_zone *dead_zone = zone->next_zone;
1936
 
1937
          printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1938
 
1939
          /* The zone must be empty.  */
1940
          gcc_assert (!dead_zone->allocated);
1941
 
1942
          /* Unchain the dead zone, release all its pages and free it.  */
1943
          zone->next_zone = zone->next_zone->next_zone;
1944
          release_pages (dead_zone);
1945
          free (dead_zone);
1946
        }
1947
    }
1948
 
1949
  timevar_pop (TV_GC);
1950
}
1951
 
1952
/* Print allocation statistics.  */
1953
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1954
                  ? (x) \
1955
                  : ((x) < 1024*1024*10 \
1956
                     ? (x) / 1024 \
1957
                     : (x) / (1024*1024))))
1958
#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1959
 
1960
void
1961
ggc_print_statistics (void)
1962
{
1963
  struct alloc_zone *zone;
1964
  struct ggc_statistics stats;
1965
  size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1966
  size_t pte_overhead, i;
1967
 
1968
  /* Clear the statistics.  */
1969
  memset (&stats, 0, sizeof (stats));
1970
 
1971
  /* Make sure collection will really occur.  */
1972
  ggc_force_collect = true;
1973
 
1974
  /* Collect and print the statistics common across collectors.  */
1975
  ggc_print_common_statistics (stderr, &stats);
1976
 
1977
  ggc_force_collect = false;
1978
 
1979
  /* Release free pages so that we will not count the bytes allocated
1980
     there as part of the total allocated memory.  */
1981
  for (zone = G.zones; zone; zone = zone->next_zone)
1982
    release_pages (zone);
1983
 
1984
  /* Collect some information about the various sizes of
1985
     allocation.  */
1986
  fprintf (stderr,
1987
           "Memory still allocated at the end of the compilation process\n");
1988
 
1989
  fprintf (stderr, "%20s %10s  %10s  %10s\n",
1990
           "Zone", "Allocated", "Used", "Overhead");
1991
  for (zone = G.zones; zone; zone = zone->next_zone)
1992
    {
1993
      struct large_page_entry *large_page;
1994
      size_t overhead, allocated, in_use;
1995
 
1996
      /* Skip empty zones.  */
1997
      if (!zone->pages && !zone->large_pages)
1998
        continue;
1999
 
2000
      allocated = in_use = 0;
2001
 
2002
      overhead = sizeof (struct alloc_zone);
2003
 
2004
      for (large_page = zone->large_pages; large_page != NULL;
2005
           large_page = large_page->next)
2006
        {
2007
          allocated += large_page->bytes;
2008
          in_use += large_page->bytes;
2009
          overhead += sizeof (struct large_page_entry);
2010
        }
2011
 
2012
      /* There's no easy way to walk through the small pages finding
2013
         used and unused objects.  Instead, add all the pages, and
2014
         subtract out the free list.  */
2015
 
2016
      allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2017
      in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2018
      overhead += G.small_page_overhead * zone->n_small_pages;
2019
 
2020
      for (i = 0; i <= NUM_FREE_BINS; i++)
2021
        {
2022
          struct alloc_chunk *chunk = zone->free_chunks[i];
2023
          while (chunk)
2024
            {
2025
              in_use -= ggc_get_size (chunk);
2026
              chunk = chunk->next_free;
2027
            }
2028
        }
2029
 
2030
      fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2031
               zone->name,
2032
               SCALE (allocated), LABEL (allocated),
2033
               SCALE (in_use), LABEL (in_use),
2034
               SCALE (overhead), LABEL (overhead));
2035
 
2036
      gcc_assert (in_use == zone->allocated);
2037
 
2038
      total_overhead += overhead;
2039
      total_allocated += zone->allocated;
2040
      total_bytes_mapped += zone->bytes_mapped;
2041
    }
2042
 
2043
  /* Count the size of the page table as best we can.  */
2044
#if HOST_BITS_PER_PTR <= 32
2045
  pte_overhead = sizeof (G.lookup);
2046
  for (i = 0; i < PAGE_L1_SIZE; i++)
2047
    if (G.lookup[i])
2048
      pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2049
#else
2050
  {
2051
    page_table table = G.lookup;
2052
    pte_overhead = 0;
2053
    while (table)
2054
      {
2055
        pte_overhead += sizeof (*table);
2056
        for (i = 0; i < PAGE_L1_SIZE; i++)
2057
          if (table->table[i])
2058
            pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2059
        table = table->next;
2060
      }
2061
  }
2062
#endif
2063
  fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2064
           "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2065
  total_overhead += pte_overhead;
2066
 
2067
  fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2068
           SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2069
           SCALE (total_allocated), LABEL(total_allocated),
2070
           SCALE (total_overhead), LABEL (total_overhead));
2071
 
2072
#ifdef GATHER_STATISTICS  
2073
  {
2074
    unsigned long long all_overhead = 0, all_allocated = 0;
2075
    unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2076
    unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2077
    unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2078
 
2079
    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2080
 
2081
    for (zone = G.zones; zone; zone = zone->next_zone)
2082
      {
2083
        all_overhead += zone->stats.total_overhead;
2084
        all_allocated += zone->stats.total_allocated;
2085
 
2086
        all_allocated_under32 += zone->stats.total_allocated_under32;
2087
        all_overhead_under32 += zone->stats.total_overhead_under32;
2088
 
2089
        all_allocated_under64 += zone->stats.total_allocated_under64;
2090
        all_overhead_under64 += zone->stats.total_overhead_under64;
2091
 
2092
        all_allocated_under128 += zone->stats.total_allocated_under128;
2093
        all_overhead_under128 += zone->stats.total_overhead_under128;
2094
 
2095
        fprintf (stderr, "%20s:                  %10lld\n",
2096
                 zone->name, zone->stats.total_allocated);
2097
      }
2098
 
2099
    fprintf (stderr, "\n");
2100
 
2101
    fprintf (stderr, "Total Overhead:                        %10lld\n",
2102
             all_overhead);
2103
    fprintf (stderr, "Total Allocated:                       %10lld\n",
2104
             all_allocated);
2105
 
2106
    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
2107
             all_overhead_under32);
2108
    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
2109
             all_allocated_under32);
2110
    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
2111
             all_overhead_under64);
2112
    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
2113
             all_allocated_under64);
2114
    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
2115
             all_overhead_under128);
2116
    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
2117
             all_allocated_under128);
2118
  }
2119
#endif
2120
}
2121
 
2122
/* Precompiled header support.  */
2123
 
2124
/* For precompiled headers, we sort objects based on their type.  We
2125
   also sort various objects into their own buckets; currently this
2126
   covers strings and IDENTIFIER_NODE trees.  The choices of how
2127
   to sort buckets have not yet been tuned.  */
2128
 
2129
#define NUM_PCH_BUCKETS         (gt_types_enum_last + 3)
2130
 
2131
#define OTHER_BUCKET            (gt_types_enum_last + 0)
2132
#define IDENTIFIER_BUCKET       (gt_types_enum_last + 1)
2133
#define STRING_BUCKET           (gt_types_enum_last + 2)
2134
 
2135
struct ggc_pch_ondisk
2136
{
2137
  size_t total;
2138
  size_t type_totals[NUM_PCH_BUCKETS];
2139
};
2140
 
2141
struct ggc_pch_data
2142
{
2143
  struct ggc_pch_ondisk d;
2144
  size_t base;
2145
  size_t orig_base;
2146
  size_t alloc_size;
2147
  alloc_type *alloc_bits;
2148
  size_t type_bases[NUM_PCH_BUCKETS];
2149
  size_t start_offset;
2150
};
2151
 
2152
/* Initialize the PCH data structure.  */
2153
 
2154
struct ggc_pch_data *
2155
init_ggc_pch (void)
2156
{
2157
  return xcalloc (sizeof (struct ggc_pch_data), 1);
2158
}
2159
 
2160
/* Return which of the page-aligned buckets the object at X, with type
2161
   TYPE, should be sorted into in the PCH.  Strings will have
2162
   IS_STRING set and TYPE will be gt_types_enum_last.  Other objects
2163
   of unknown type will also have TYPE equal to gt_types_enum_last.  */
2164
 
2165
static int
2166
pch_bucket (void *x, enum gt_types_enum type,
2167
            bool is_string)
2168
{
2169
  /* Sort identifiers into their own bucket, to improve locality
2170
     when searching the identifier hash table.  */
2171
  if (type == gt_ggc_e_14lang_tree_node
2172
      && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2173
    return IDENTIFIER_BUCKET;
2174
  else if (type == gt_types_enum_last)
2175
    {
2176
      if (is_string)
2177
        return STRING_BUCKET;
2178
      return OTHER_BUCKET;
2179
    }
2180
  return type;
2181
}
2182
 
2183
/* Add the size of object X to the size of the PCH data.  */
2184
 
2185
void
2186
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2187
                      size_t size, bool is_string, enum gt_types_enum type)
2188
{
2189
  /* NOTE: Right now we don't need to align up the size of any objects.
2190
     Strings can be unaligned, and everything else is allocated to a
2191
     MAX_ALIGNMENT boundary already.  */
2192
 
2193
  d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2194
}
2195
 
2196
/* Return the total size of the PCH data.  */
2197
 
2198
size_t
2199
ggc_pch_total_size (struct ggc_pch_data *d)
2200
{
2201
  enum gt_types_enum i;
2202
  size_t alloc_size, total_size;
2203
 
2204
  total_size = 0;
2205
  for (i = 0; i < NUM_PCH_BUCKETS; i++)
2206
    {
2207
      d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2208
      total_size += d->d.type_totals[i];
2209
    }
2210
  d->d.total = total_size;
2211
 
2212
  /* Include the size of the allocation bitmap.  */
2213
  alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2214
  alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2215
  d->alloc_size = alloc_size;
2216
 
2217
  return d->d.total + alloc_size;
2218
}
2219
 
2220
/* Set the base address for the objects in the PCH file.  */
2221
 
2222
void
2223
ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2224
{
2225
  int i;
2226
  size_t base = (size_t) base_;
2227
 
2228
  d->base = d->orig_base = base;
2229
  for (i = 0; i < NUM_PCH_BUCKETS; i++)
2230
    {
2231
      d->type_bases[i] = base;
2232
      base += d->d.type_totals[i];
2233
    }
2234
 
2235
  if (d->alloc_bits == NULL)
2236
    d->alloc_bits = xcalloc (1, d->alloc_size);
2237
}
2238
 
2239
/* Allocate a place for object X of size SIZE in the PCH file.  */
2240
 
2241
char *
2242
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2243
                      size_t size, bool is_string,
2244
                      enum gt_types_enum type)
2245
{
2246
  size_t alloc_word, alloc_bit;
2247
  char *result;
2248
  int bucket = pch_bucket (x, type, is_string);
2249
 
2250
  /* Record the start of the object in the allocation bitmap.  We
2251
     can't assert that the allocation bit is previously clear, because
2252
     strings may violate the invariant that they are at least
2253
     BYTES_PER_ALLOC_BIT long.  This is harmless - ggc_get_size
2254
     should not be called for strings.  */
2255
  alloc_word = ((d->type_bases[bucket] - d->orig_base)
2256
                / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2257
  alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2258
               / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2259
  d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2260
 
2261
  /* Place the object at the current pointer for this bucket.  */
2262
  result = (char *) d->type_bases[bucket];
2263
  d->type_bases[bucket] += size;
2264
  return result;
2265
}
2266
 
2267
/* Prepare to write out the PCH data to file F.  */
2268
 
2269
void
2270
ggc_pch_prepare_write (struct ggc_pch_data *d,
2271
                       FILE *f)
2272
{
2273
  /* We seek around a lot while writing.  Record where the end
2274
     of the padding in the PCH file is, so that we can
2275
     locate each object's offset.  */
2276
  d->start_offset = ftell (f);
2277
}
2278
 
2279
/* Write out object X of SIZE to file F.  */
2280
 
2281
void
2282
ggc_pch_write_object (struct ggc_pch_data *d,
2283
                      FILE *f, void *x, void *newx,
2284
                      size_t size, bool is_string ATTRIBUTE_UNUSED)
2285
{
2286
  if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2287
    fatal_error ("can't seek PCH file: %m");
2288
 
2289
  if (fwrite (x, size, 1, f) != 1)
2290
    fatal_error ("can't write PCH file: %m");
2291
}
2292
 
2293
void
2294
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2295
{
2296
  /* Write out the allocation bitmap.  */
2297
  if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2298
    fatal_error ("can't seek PCH file: %m");
2299
 
2300
  if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2301
    fatal_error ("can't write PCH fle: %m");
2302
 
2303
  /* Done with the PCH, so write out our footer.  */
2304
  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2305
    fatal_error ("can't write PCH file: %m");
2306
 
2307
  free (d->alloc_bits);
2308
  free (d);
2309
}
2310
 
2311
/* The PCH file from F has been mapped at ADDR.  Read in any
2312
   additional data from the file and set up the GC state.  */
2313
 
2314
void
2315
ggc_pch_read (FILE *f, void *addr)
2316
{
2317
  struct ggc_pch_ondisk d;
2318
  size_t alloc_size;
2319
  struct alloc_zone *zone;
2320
  struct page_entry *pch_page;
2321
  char *p;
2322
 
2323
  if (fread (&d, sizeof (d), 1, f) != 1)
2324
    fatal_error ("can't read PCH file: %m");
2325
 
2326
  alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2327
  alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2328
 
2329
  pch_zone.bytes = d.total;
2330
  pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2331
  pch_zone.page = (char *) addr;
2332
  pch_zone.end = (char *) pch_zone.alloc_bits;
2333
 
2334
  /* We've just read in a PCH file.  So, every object that used to be
2335
     allocated is now free.  */
2336
  for (zone = G.zones; zone; zone = zone->next_zone)
2337
    {
2338
      struct small_page_entry *page, *next_page;
2339
      struct large_page_entry *large_page, *next_large_page;
2340
 
2341
      zone->allocated = 0;
2342
 
2343
      /* Clear the zone's free chunk list.  */
2344
      memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2345
      zone->high_free_bin = 0;
2346
      zone->cached_free = NULL;
2347
      zone->cached_free_size = 0;
2348
 
2349
      /* Move all the small pages onto the free list.  */
2350
      for (page = zone->pages; page != NULL; page = next_page)
2351
        {
2352
          next_page = page->next;
2353
          memset (page->alloc_bits, 0,
2354
                  G.small_page_overhead - PAGE_OVERHEAD);
2355
          free_small_page (page);
2356
        }
2357
 
2358
      /* Discard all the large pages.  */
2359
      for (large_page = zone->large_pages; large_page != NULL;
2360
           large_page = next_large_page)
2361
        {
2362
          next_large_page = large_page->next;
2363
          free_large_page (large_page);
2364
        }
2365
 
2366
      zone->pages = NULL;
2367
      zone->large_pages = NULL;
2368
    }
2369
 
2370
  /* Allocate the dummy page entry for the PCH, and set all pages
2371
     mapped into the PCH to reference it.  */
2372
  pch_page = xcalloc (1, sizeof (struct page_entry));
2373
  pch_page->page = pch_zone.page;
2374
  pch_page->pch_p = true;
2375
 
2376
  for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2377
    set_page_table_entry (p, pch_page);
2378
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.