OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [ggc-page.c] - Blame information for rev 154

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* "Bag-of-pages" garbage collector for the GNU compiler.
2
   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3
   Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "tree.h"
26
#include "rtl.h"
27
#include "tm_p.h"
28
#include "toplev.h"
29
#include "flags.h"
30
#include "ggc.h"
31
#include "timevar.h"
32
#include "params.h"
33
#include "tree-flow.h"
34
#ifdef ENABLE_VALGRIND_CHECKING
35
# ifdef HAVE_VALGRIND_MEMCHECK_H
36
#  include <valgrind/memcheck.h>
37
# elif defined HAVE_MEMCHECK_H
38
#  include <memcheck.h>
39
# else
40
#  include <valgrind.h>
41
# endif
42
#else
43
/* Avoid #ifdef:s when we can help it.  */
44
#define VALGRIND_DISCARD(x)
45
#endif
46
 
47
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
48
   file open.  Prefer either to valloc.  */
49
#ifdef HAVE_MMAP_ANON
50
# undef HAVE_MMAP_DEV_ZERO
51
 
52
# include <sys/mman.h>
53
# ifndef MAP_FAILED
54
#  define MAP_FAILED -1
55
# endif
56
# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
57
#  define MAP_ANONYMOUS MAP_ANON
58
# endif
59
# define USING_MMAP
60
 
61
#endif
62
 
63
#ifdef HAVE_MMAP_DEV_ZERO
64
 
65
# include <sys/mman.h>
66
# ifndef MAP_FAILED
67
#  define MAP_FAILED -1
68
# endif
69
# define USING_MMAP
70
 
71
#endif
72
 
73
#ifndef USING_MMAP
74
#define USING_MALLOC_PAGE_GROUPS
75
#endif
76
 
77
/* Strategy:
78
 
79
   This garbage-collecting allocator allocates objects on one of a set
80
   of pages.  Each page can allocate objects of a single size only;
81
   available sizes are powers of two starting at four bytes.  The size
82
   of an allocation request is rounded up to the next power of two
83
   (`order'), and satisfied from the appropriate page.
84
 
85
   Each page is recorded in a page-entry, which also maintains an
86
   in-use bitmap of object positions on the page.  This allows the
87
   allocation state of a particular object to be flipped without
88
   touching the page itself.
89
 
90
   Each page-entry also has a context depth, which is used to track
91
   pushing and popping of allocation contexts.  Only objects allocated
92
   in the current (highest-numbered) context may be collected.
93
 
94
   Page entries are arranged in an array of singly-linked lists.  The
95
   array is indexed by the allocation size, in bits, of the pages on
96
   it; i.e. all pages on a list allocate objects of the same size.
97
   Pages are ordered on the list such that all non-full pages precede
98
   all full pages, with non-full pages arranged in order of decreasing
99
   context depth.
100
 
101
   Empty pages (of all orders) are kept on a single page cache list,
102
   and are considered first when new pages are required; they are
103
   deallocated at the start of the next collection if they haven't
104
   been recycled by then.  */
105
 
106
/* Define GGC_DEBUG_LEVEL to print debugging information.
107
     0: No debugging output.
108
     1: GC statistics only.
109
     2: Page-entry allocations/deallocations as well.
110
     3: Object allocations as well.
111
     4: Object marks as well.  */
112
#define GGC_DEBUG_LEVEL (0)
113
 
114
#ifndef HOST_BITS_PER_PTR
115
#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
116
#endif
117
 
118
 
119
/* A two-level tree is used to look up the page-entry for a given
120
   pointer.  Two chunks of the pointer's bits are extracted to index
121
   the first and second levels of the tree, as follows:
122
 
123
                                   HOST_PAGE_SIZE_BITS
124
                           32           |      |
125
       msb +----------------+----+------+------+ lsb
126
                            |    |      |
127
                         PAGE_L1_BITS   |
128
                                 |      |
129
                               PAGE_L2_BITS
130
 
131
   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
132
   pages are aligned on system page boundaries.  The next most
133
   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
134
   index values in the lookup table, respectively.
135
 
136
   For 32-bit architectures and the settings below, there are no
137
   leftover bits.  For architectures with wider pointers, the lookup
138
   tree points to a list of pages, which must be scanned to find the
139
   correct one.  */
140
 
141
#define PAGE_L1_BITS    (8)
142
#define PAGE_L2_BITS    (32 - PAGE_L1_BITS - G.lg_pagesize)
143
#define PAGE_L1_SIZE    ((size_t) 1 << PAGE_L1_BITS)
144
#define PAGE_L2_SIZE    ((size_t) 1 << PAGE_L2_BITS)
145
 
146
#define LOOKUP_L1(p) \
147
  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
148
 
149
#define LOOKUP_L2(p) \
150
  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
151
 
152
/* The number of objects per allocation page, for objects on a page of
153
   the indicated ORDER.  */
154
#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
155
 
156
/* The number of objects in P.  */
157
#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
158
 
159
/* The size of an object on a page of the indicated ORDER.  */
160
#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
161
 
162
/* For speed, we avoid doing a general integer divide to locate the
163
   offset in the allocation bitmap, by precalculating numbers M, S
164
   such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
165
   within the page which is evenly divisible by the object size Z.  */
166
#define DIV_MULT(ORDER) inverse_table[ORDER].mult
167
#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
168
#define OFFSET_TO_BIT(OFFSET, ORDER) \
169
  (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
170
 
171
/* The number of extra orders, not corresponding to power-of-two sized
172
   objects.  */
173
 
174
#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
175
 
176
#define RTL_SIZE(NSLOTS) \
177
  (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
178
 
179
#define TREE_EXP_SIZE(OPS) \
180
  (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
181
 
182
/* The Ith entry is the maximum size of an object to be stored in the
183
   Ith extra order.  Adding a new entry to this array is the *only*
184
   thing you need to do to add a new special allocation size.  */
185
 
186
static const size_t extra_order_size_table[] = {
187
  sizeof (struct stmt_ann_d),
188
  sizeof (struct var_ann_d),
189
  sizeof (struct tree_decl_non_common),
190
  sizeof (struct tree_field_decl),
191
  sizeof (struct tree_parm_decl),
192
  sizeof (struct tree_var_decl),
193
  sizeof (struct tree_list),
194
  sizeof (struct tree_ssa_name),
195
  sizeof (struct function),
196
  sizeof (struct basic_block_def),
197
  sizeof (bitmap_element),
198
  /* PHI nodes with one to three arguments are already covered by the
199
     above sizes.  */
200
  sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
201
  TREE_EXP_SIZE (2),
202
  RTL_SIZE (2),                 /* MEM, PLUS, etc.  */
203
  RTL_SIZE (9),                 /* INSN */
204
};
205
 
206
/* The total number of orders.  */
207
 
208
#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209
 
210
/* We use this structure to determine the alignment required for
211
   allocations.  For power-of-two sized allocations, that's not a
212
   problem, but it does matter for odd-sized allocations.  */
213
 
214
struct max_alignment {
215
  char c;
216
  union {
217
    HOST_WIDEST_INT i;
218
    long double d;
219
  } u;
220
};
221
 
222
/* The biggest alignment required.  */
223
 
224
#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
225
 
226
/* Compute the smallest nonnegative number which when added to X gives
227
   a multiple of F.  */
228
 
229
#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
230
 
231
/* Compute the smallest multiple of F that is >= X.  */
232
 
233
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
234
 
235
/* The Ith entry is the number of objects on a page or order I.  */
236
 
237
static unsigned objects_per_page_table[NUM_ORDERS];
238
 
239
/* The Ith entry is the size of an object on a page of order I.  */
240
 
241
static size_t object_size_table[NUM_ORDERS];
242
 
243
/* The Ith entry is a pair of numbers (mult, shift) such that
244
   ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
245
   for all k evenly divisible by OBJECT_SIZE(I).  */
246
 
247
static struct
248
{
249
  size_t mult;
250
  unsigned int shift;
251
}
252
inverse_table[NUM_ORDERS];
253
 
254
/* A page_entry records the status of an allocation page.  This
255
   structure is dynamically sized to fit the bitmap in_use_p.  */
256
typedef struct page_entry
257
{
258
  /* The next page-entry with objects of the same size, or NULL if
259
     this is the last page-entry.  */
260
  struct page_entry *next;
261
 
262
  /* The previous page-entry with objects of the same size, or NULL if
263
     this is the first page-entry.   The PREV pointer exists solely to
264
     keep the cost of ggc_free manageable.  */
265
  struct page_entry *prev;
266
 
267
  /* The number of bytes allocated.  (This will always be a multiple
268
     of the host system page size.)  */
269
  size_t bytes;
270
 
271
  /* The address at which the memory is allocated.  */
272
  char *page;
273
 
274
#ifdef USING_MALLOC_PAGE_GROUPS
275
  /* Back pointer to the page group this page came from.  */
276
  struct page_group *group;
277
#endif
278
 
279
  /* This is the index in the by_depth varray where this page table
280
     can be found.  */
281
  unsigned long index_by_depth;
282
 
283
  /* Context depth of this page.  */
284
  unsigned short context_depth;
285
 
286
  /* The number of free objects remaining on this page.  */
287
  unsigned short num_free_objects;
288
 
289
  /* A likely candidate for the bit position of a free object for the
290
     next allocation from this page.  */
291
  unsigned short next_bit_hint;
292
 
293
  /* The lg of size of objects allocated from this page.  */
294
  unsigned char order;
295
 
296
  /* A bit vector indicating whether or not objects are in use.  The
297
     Nth bit is one if the Nth object on this page is allocated.  This
298
     array is dynamically sized.  */
299
  unsigned long in_use_p[1];
300
} page_entry;
301
 
302
#ifdef USING_MALLOC_PAGE_GROUPS
303
/* A page_group describes a large allocation from malloc, from which
304
   we parcel out aligned pages.  */
305
typedef struct page_group
306
{
307
  /* A linked list of all extant page groups.  */
308
  struct page_group *next;
309
 
310
  /* The address we received from malloc.  */
311
  char *allocation;
312
 
313
  /* The size of the block.  */
314
  size_t alloc_size;
315
 
316
  /* A bitmask of pages in use.  */
317
  unsigned int in_use;
318
} page_group;
319
#endif
320
 
321
#if HOST_BITS_PER_PTR <= 32
322
 
323
/* On 32-bit hosts, we use a two level page table, as pictured above.  */
324
typedef page_entry **page_table[PAGE_L1_SIZE];
325
 
326
#else
327
 
328
/* On 64-bit hosts, we use the same two level page tables plus a linked
329
   list that disambiguates the top 32-bits.  There will almost always be
330
   exactly one entry in the list.  */
331
typedef struct page_table_chain
332
{
333
  struct page_table_chain *next;
334
  size_t high_bits;
335
  page_entry **table[PAGE_L1_SIZE];
336
} *page_table;
337
 
338
#endif
339
 
340
/* The rest of the global variables.  */
341
static struct globals
342
{
343
  /* The Nth element in this array is a page with objects of size 2^N.
344
     If there are any pages with free objects, they will be at the
345
     head of the list.  NULL if there are no page-entries for this
346
     object size.  */
347
  page_entry *pages[NUM_ORDERS];
348
 
349
  /* The Nth element in this array is the last page with objects of
350
     size 2^N.  NULL if there are no page-entries for this object
351
     size.  */
352
  page_entry *page_tails[NUM_ORDERS];
353
 
354
  /* Lookup table for associating allocation pages with object addresses.  */
355
  page_table lookup;
356
 
357
  /* The system's page size.  */
358
  size_t pagesize;
359
  size_t lg_pagesize;
360
 
361
  /* Bytes currently allocated.  */
362
  size_t allocated;
363
 
364
  /* Bytes currently allocated at the end of the last collection.  */
365
  size_t allocated_last_gc;
366
 
367
  /* Total amount of memory mapped.  */
368
  size_t bytes_mapped;
369
 
370
  /* Bit N set if any allocations have been done at context depth N.  */
371
  unsigned long context_depth_allocations;
372
 
373
  /* Bit N set if any collections have been done at context depth N.  */
374
  unsigned long context_depth_collections;
375
 
376
  /* The current depth in the context stack.  */
377
  unsigned short context_depth;
378
 
379
  /* A file descriptor open to /dev/zero for reading.  */
380
#if defined (HAVE_MMAP_DEV_ZERO)
381
  int dev_zero_fd;
382
#endif
383
 
384
  /* A cache of free system pages.  */
385
  page_entry *free_pages;
386
 
387
#ifdef USING_MALLOC_PAGE_GROUPS
388
  page_group *page_groups;
389
#endif
390
 
391
  /* The file descriptor for debugging output.  */
392
  FILE *debug_file;
393
 
394
  /* Current number of elements in use in depth below.  */
395
  unsigned int depth_in_use;
396
 
397
  /* Maximum number of elements that can be used before resizing.  */
398
  unsigned int depth_max;
399
 
400
  /* Each element of this arry is an index in by_depth where the given
401
     depth starts.  This structure is indexed by that given depth we
402
     are interested in.  */
403
  unsigned int *depth;
404
 
405
  /* Current number of elements in use in by_depth below.  */
406
  unsigned int by_depth_in_use;
407
 
408
  /* Maximum number of elements that can be used before resizing.  */
409
  unsigned int by_depth_max;
410
 
411
  /* Each element of this array is a pointer to a page_entry, all
412
     page_entries can be found in here by increasing depth.
413
     index_by_depth in the page_entry is the index into this data
414
     structure where that page_entry can be found.  This is used to
415
     speed up finding all page_entries at a particular depth.  */
416
  page_entry **by_depth;
417
 
418
  /* Each element is a pointer to the saved in_use_p bits, if any,
419
     zero otherwise.  We allocate them all together, to enable a
420
     better runtime data access pattern.  */
421
  unsigned long **save_in_use;
422
 
423
#ifdef ENABLE_GC_ALWAYS_COLLECT
424
  /* List of free objects to be verified as actually free on the
425
     next collection.  */
426
  struct free_object
427
  {
428
    void *object;
429
    struct free_object *next;
430
  } *free_object_list;
431
#endif
432
 
433
#ifdef GATHER_STATISTICS
434
  struct
435
  {
436
    /* Total memory allocated with ggc_alloc.  */
437
    unsigned long long total_allocated;
438
    /* Total overhead for memory to be allocated with ggc_alloc.  */
439
    unsigned long long total_overhead;
440
 
441
    /* Total allocations and overhead for sizes less than 32, 64 and 128.
442
       These sizes are interesting because they are typical cache line
443
       sizes.  */
444
 
445
    unsigned long long total_allocated_under32;
446
    unsigned long long total_overhead_under32;
447
 
448
    unsigned long long total_allocated_under64;
449
    unsigned long long total_overhead_under64;
450
 
451
    unsigned long long total_allocated_under128;
452
    unsigned long long total_overhead_under128;
453
 
454
    /* The allocations for each of the allocation orders.  */
455
    unsigned long long total_allocated_per_order[NUM_ORDERS];
456
 
457
    /* The overhead for each of the allocation orders.  */
458
    unsigned long long total_overhead_per_order[NUM_ORDERS];
459
  } stats;
460
#endif
461
} G;
462
 
463
/* The size in bytes required to maintain a bitmap for the objects
464
   on a page-entry.  */
465
#define BITMAP_SIZE(Num_objects) \
466
  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
467
 
468
/* Allocate pages in chunks of this size, to throttle calls to memory
469
   allocation routines.  The first page is used, the rest go onto the
470
   free list.  This cannot be larger than HOST_BITS_PER_INT for the
471
   in_use bitmask for page_group.  Hosts that need a different value
472
   can override this by defining GGC_QUIRE_SIZE explicitly.  */
473
#ifndef GGC_QUIRE_SIZE
474
# ifdef USING_MMAP
475
#  define GGC_QUIRE_SIZE 256
476
# else
477
#  define GGC_QUIRE_SIZE 16
478
# endif
479
#endif
480
 
481
/* Initial guess as to how many page table entries we might need.  */
482
#define INITIAL_PTE_COUNT 128
483
 
484
static int ggc_allocated_p (const void *);
485
static page_entry *lookup_page_table_entry (const void *);
486
static void set_page_table_entry (void *, page_entry *);
487
#ifdef USING_MMAP
488
static char *alloc_anon (char *, size_t);
489
#endif
490
#ifdef USING_MALLOC_PAGE_GROUPS
491
static size_t page_group_index (char *, char *);
492
static void set_page_group_in_use (page_group *, char *);
493
static void clear_page_group_in_use (page_group *, char *);
494
#endif
495
static struct page_entry * alloc_page (unsigned);
496
static void free_page (struct page_entry *);
497
static void release_pages (void);
498
static void clear_marks (void);
499
static void sweep_pages (void);
500
static void ggc_recalculate_in_use_p (page_entry *);
501
static void compute_inverse (unsigned);
502
static inline void adjust_depth (void);
503
static void move_ptes_to_front (int, int);
504
 
505
void debug_print_page_list (int);
506
static void push_depth (unsigned int);
507
static void push_by_depth (page_entry *, unsigned long *);
508
 
509
/* Push an entry onto G.depth.  */
510
 
511
inline static void
512
push_depth (unsigned int i)
513
{
514
  if (G.depth_in_use >= G.depth_max)
515
    {
516
      G.depth_max *= 2;
517
      G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
518
    }
519
  G.depth[G.depth_in_use++] = i;
520
}
521
 
522
/* Push an entry onto G.by_depth and G.save_in_use.  */
523
 
524
inline static void
525
push_by_depth (page_entry *p, unsigned long *s)
526
{
527
  if (G.by_depth_in_use >= G.by_depth_max)
528
    {
529
      G.by_depth_max *= 2;
530
      G.by_depth = xrealloc (G.by_depth,
531
                             G.by_depth_max * sizeof (page_entry *));
532
      G.save_in_use = xrealloc (G.save_in_use,
533
                                G.by_depth_max * sizeof (unsigned long *));
534
    }
535
  G.by_depth[G.by_depth_in_use] = p;
536
  G.save_in_use[G.by_depth_in_use++] = s;
537
}
538
 
539
#if (GCC_VERSION < 3001)
540
#define prefetch(X) ((void) X)
541
#else
542
#define prefetch(X) __builtin_prefetch (X)
543
#endif
544
 
545
#define save_in_use_p_i(__i) \
546
  (G.save_in_use[__i])
547
#define save_in_use_p(__p) \
548
  (save_in_use_p_i (__p->index_by_depth))
549
 
550
/* Returns nonzero if P was allocated in GC'able memory.  */
551
 
552
static inline int
553
ggc_allocated_p (const void *p)
554
{
555
  page_entry ***base;
556
  size_t L1, L2;
557
 
558
#if HOST_BITS_PER_PTR <= 32
559
  base = &G.lookup[0];
560
#else
561
  page_table table = G.lookup;
562
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
563
  while (1)
564
    {
565
      if (table == NULL)
566
        return 0;
567
      if (table->high_bits == high_bits)
568
        break;
569
      table = table->next;
570
    }
571
  base = &table->table[0];
572
#endif
573
 
574
  /* Extract the level 1 and 2 indices.  */
575
  L1 = LOOKUP_L1 (p);
576
  L2 = LOOKUP_L2 (p);
577
 
578
  return base[L1] && base[L1][L2];
579
}
580
 
581
/* Traverse the page table and find the entry for a page.
582
   Die (probably) if the object wasn't allocated via GC.  */
583
 
584
static inline page_entry *
585
lookup_page_table_entry (const void *p)
586
{
587
  page_entry ***base;
588
  size_t L1, L2;
589
 
590
#if HOST_BITS_PER_PTR <= 32
591
  base = &G.lookup[0];
592
#else
593
  page_table table = G.lookup;
594
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
595
  while (table->high_bits != high_bits)
596
    table = table->next;
597
  base = &table->table[0];
598
#endif
599
 
600
  /* Extract the level 1 and 2 indices.  */
601
  L1 = LOOKUP_L1 (p);
602
  L2 = LOOKUP_L2 (p);
603
 
604
  return base[L1][L2];
605
}
606
 
607
/* Set the page table entry for a page.  */
608
 
609
static void
610
set_page_table_entry (void *p, page_entry *entry)
611
{
612
  page_entry ***base;
613
  size_t L1, L2;
614
 
615
#if HOST_BITS_PER_PTR <= 32
616
  base = &G.lookup[0];
617
#else
618
  page_table table;
619
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
620
  for (table = G.lookup; table; table = table->next)
621
    if (table->high_bits == high_bits)
622
      goto found;
623
 
624
  /* Not found -- allocate a new table.  */
625
  table = xcalloc (1, sizeof(*table));
626
  table->next = G.lookup;
627
  table->high_bits = high_bits;
628
  G.lookup = table;
629
found:
630
  base = &table->table[0];
631
#endif
632
 
633
  /* Extract the level 1 and 2 indices.  */
634
  L1 = LOOKUP_L1 (p);
635
  L2 = LOOKUP_L2 (p);
636
 
637
  if (base[L1] == NULL)
638
    base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
639
 
640
  base[L1][L2] = entry;
641
}
642
 
643
/* Prints the page-entry for object size ORDER, for debugging.  */
644
 
645
void
646
debug_print_page_list (int order)
647
{
648
  page_entry *p;
649
  printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
650
          (void *) G.page_tails[order]);
651
  p = G.pages[order];
652
  while (p != NULL)
653
    {
654
      printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
655
              p->num_free_objects);
656
      p = p->next;
657
    }
658
  printf ("NULL\n");
659
  fflush (stdout);
660
}
661
 
662
#ifdef USING_MMAP
663
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
664
   (if non-null).  The ifdef structure here is intended to cause a
665
   compile error unless exactly one of the HAVE_* is defined.  */
666
 
667
static inline char *
668
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
669
{
670
#ifdef HAVE_MMAP_ANON
671
  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
672
                     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
673
#endif
674
#ifdef HAVE_MMAP_DEV_ZERO
675
  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
676
                     MAP_PRIVATE, G.dev_zero_fd, 0);
677
#endif
678
 
679
  if (page == (char *) MAP_FAILED)
680
    {
681
      perror ("virtual memory exhausted");
682
      exit (FATAL_EXIT_CODE);
683
    }
684
 
685
  /* Remember that we allocated this memory.  */
686
  G.bytes_mapped += size;
687
 
688
  /* Pretend we don't have access to the allocated pages.  We'll enable
689
     access to smaller pieces of the area in ggc_alloc.  Discard the
690
     handle to avoid handle leak.  */
691
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
692
 
693
  return page;
694
}
695
#endif
696
#ifdef USING_MALLOC_PAGE_GROUPS
697
/* Compute the index for this page into the page group.  */
698
 
699
static inline size_t
700
page_group_index (char *allocation, char *page)
701
{
702
  return (size_t) (page - allocation) >> G.lg_pagesize;
703
}
704
 
705
/* Set and clear the in_use bit for this page in the page group.  */
706
 
707
static inline void
708
set_page_group_in_use (page_group *group, char *page)
709
{
710
  group->in_use |= 1 << page_group_index (group->allocation, page);
711
}
712
 
713
static inline void
714
clear_page_group_in_use (page_group *group, char *page)
715
{
716
  group->in_use &= ~(1 << page_group_index (group->allocation, page));
717
}
718
#endif
719
 
720
/* Allocate a new page for allocating objects of size 2^ORDER,
721
   and return an entry for it.  The entry is not added to the
722
   appropriate page_table list.  */
723
 
724
static inline struct page_entry *
725
alloc_page (unsigned order)
726
{
727
  struct page_entry *entry, *p, **pp;
728
  char *page;
729
  size_t num_objects;
730
  size_t bitmap_size;
731
  size_t page_entry_size;
732
  size_t entry_size;
733
#ifdef USING_MALLOC_PAGE_GROUPS
734
  page_group *group;
735
#endif
736
 
737
  num_objects = OBJECTS_PER_PAGE (order);
738
  bitmap_size = BITMAP_SIZE (num_objects + 1);
739
  page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
740
  entry_size = num_objects * OBJECT_SIZE (order);
741
  if (entry_size < G.pagesize)
742
    entry_size = G.pagesize;
743
 
744
  entry = NULL;
745
  page = NULL;
746
 
747
  /* Check the list of free pages for one we can use.  */
748
  for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
749
    if (p->bytes == entry_size)
750
      break;
751
 
752
  if (p != NULL)
753
    {
754
      /* Recycle the allocated memory from this page ...  */
755
      *pp = p->next;
756
      page = p->page;
757
 
758
#ifdef USING_MALLOC_PAGE_GROUPS
759
      group = p->group;
760
#endif
761
 
762
      /* ... and, if possible, the page entry itself.  */
763
      if (p->order == order)
764
        {
765
          entry = p;
766
          memset (entry, 0, page_entry_size);
767
        }
768
      else
769
        free (p);
770
    }
771
#ifdef USING_MMAP
772
  else if (entry_size == G.pagesize)
773
    {
774
      /* We want just one page.  Allocate a bunch of them and put the
775
         extras on the freelist.  (Can only do this optimization with
776
         mmap for backing store.)  */
777
      struct page_entry *e, *f = G.free_pages;
778
      int i;
779
 
780
      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
781
 
782
      /* This loop counts down so that the chain will be in ascending
783
         memory order.  */
784
      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
785
        {
786
          e = xcalloc (1, page_entry_size);
787
          e->order = order;
788
          e->bytes = G.pagesize;
789
          e->page = page + (i << G.lg_pagesize);
790
          e->next = f;
791
          f = e;
792
        }
793
 
794
      G.free_pages = f;
795
    }
796
  else
797
    page = alloc_anon (NULL, entry_size);
798
#endif
799
#ifdef USING_MALLOC_PAGE_GROUPS
800
  else
801
    {
802
      /* Allocate a large block of memory and serve out the aligned
803
         pages therein.  This results in much less memory wastage
804
         than the traditional implementation of valloc.  */
805
 
806
      char *allocation, *a, *enda;
807
      size_t alloc_size, head_slop, tail_slop;
808
      int multiple_pages = (entry_size == G.pagesize);
809
 
810
      if (multiple_pages)
811
        alloc_size = GGC_QUIRE_SIZE * G.pagesize;
812
      else
813
        alloc_size = entry_size + G.pagesize - 1;
814
      allocation = xmalloc (alloc_size);
815
 
816
      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
817
      head_slop = page - allocation;
818
      if (multiple_pages)
819
        tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
820
      else
821
        tail_slop = alloc_size - entry_size - head_slop;
822
      enda = allocation + alloc_size - tail_slop;
823
 
824
      /* We allocated N pages, which are likely not aligned, leaving
825
         us with N-1 usable pages.  We plan to place the page_group
826
         structure somewhere in the slop.  */
827
      if (head_slop >= sizeof (page_group))
828
        group = (page_group *)page - 1;
829
      else
830
        {
831
          /* We magically got an aligned allocation.  Too bad, we have
832
             to waste a page anyway.  */
833
          if (tail_slop == 0)
834
            {
835
              enda -= G.pagesize;
836
              tail_slop += G.pagesize;
837
            }
838
          gcc_assert (tail_slop >= sizeof (page_group));
839
          group = (page_group *)enda;
840
          tail_slop -= sizeof (page_group);
841
        }
842
 
843
      /* Remember that we allocated this memory.  */
844
      group->next = G.page_groups;
845
      group->allocation = allocation;
846
      group->alloc_size = alloc_size;
847
      group->in_use = 0;
848
      G.page_groups = group;
849
      G.bytes_mapped += alloc_size;
850
 
851
      /* If we allocated multiple pages, put the rest on the free list.  */
852
      if (multiple_pages)
853
        {
854
          struct page_entry *e, *f = G.free_pages;
855
          for (a = enda - G.pagesize; a != page; a -= G.pagesize)
856
            {
857
              e = xcalloc (1, page_entry_size);
858
              e->order = order;
859
              e->bytes = G.pagesize;
860
              e->page = a;
861
              e->group = group;
862
              e->next = f;
863
              f = e;
864
            }
865
          G.free_pages = f;
866
        }
867
    }
868
#endif
869
 
870
  if (entry == NULL)
871
    entry = xcalloc (1, page_entry_size);
872
 
873
  entry->bytes = entry_size;
874
  entry->page = page;
875
  entry->context_depth = G.context_depth;
876
  entry->order = order;
877
  entry->num_free_objects = num_objects;
878
  entry->next_bit_hint = 1;
879
 
880
  G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
881
 
882
#ifdef USING_MALLOC_PAGE_GROUPS
883
  entry->group = group;
884
  set_page_group_in_use (group, page);
885
#endif
886
 
887
  /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
888
     increment the hint.  */
889
  entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
890
    = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
891
 
892
  set_page_table_entry (page, entry);
893
 
894
  if (GGC_DEBUG_LEVEL >= 2)
895
    fprintf (G.debug_file,
896
             "Allocating page at %p, object size=%lu, data %p-%p\n",
897
             (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
898
             page + entry_size - 1);
899
 
900
  return entry;
901
}
902
 
903
/* Adjust the size of G.depth so that no index greater than the one
904
   used by the top of the G.by_depth is used.  */
905
 
906
static inline void
907
adjust_depth (void)
908
{
909
  page_entry *top;
910
 
911
  if (G.by_depth_in_use)
912
    {
913
      top = G.by_depth[G.by_depth_in_use-1];
914
 
915
      /* Peel back indices in depth that index into by_depth, so that
916
         as new elements are added to by_depth, we note the indices
917
         of those elements, if they are for new context depths.  */
918
      while (G.depth_in_use > (size_t)top->context_depth+1)
919
        --G.depth_in_use;
920
    }
921
}
922
 
923
/* For a page that is no longer needed, put it on the free page list.  */
924
 
925
static void
926
free_page (page_entry *entry)
927
{
928
  if (GGC_DEBUG_LEVEL >= 2)
929
    fprintf (G.debug_file,
930
             "Deallocating page at %p, data %p-%p\n", (void *) entry,
931
             entry->page, entry->page + entry->bytes - 1);
932
 
933
  /* Mark the page as inaccessible.  Discard the handle to avoid handle
934
     leak.  */
935
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
936
 
937
  set_page_table_entry (entry->page, NULL);
938
 
939
#ifdef USING_MALLOC_PAGE_GROUPS
940
  clear_page_group_in_use (entry->group, entry->page);
941
#endif
942
 
943
  if (G.by_depth_in_use > 1)
944
    {
945
      page_entry *top = G.by_depth[G.by_depth_in_use-1];
946
      int i = entry->index_by_depth;
947
 
948
      /* We cannot free a page from a context deeper than the current
949
         one.  */
950
      gcc_assert (entry->context_depth == top->context_depth);
951
 
952
      /* Put top element into freed slot.  */
953
      G.by_depth[i] = top;
954
      G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
955
      top->index_by_depth = i;
956
    }
957
  --G.by_depth_in_use;
958
 
959
  adjust_depth ();
960
 
961
  entry->next = G.free_pages;
962
  G.free_pages = entry;
963
}
964
 
965
/* Release the free page cache to the system.  */
966
 
967
static void
968
release_pages (void)
969
{
970
#ifdef USING_MMAP
971
  page_entry *p, *next;
972
  char *start;
973
  size_t len;
974
 
975
  /* Gather up adjacent pages so they are unmapped together.  */
976
  p = G.free_pages;
977
 
978
  while (p)
979
    {
980
      start = p->page;
981
      next = p->next;
982
      len = p->bytes;
983
      free (p);
984
      p = next;
985
 
986
      while (p && p->page == start + len)
987
        {
988
          next = p->next;
989
          len += p->bytes;
990
          free (p);
991
          p = next;
992
        }
993
 
994
      munmap (start, len);
995
      G.bytes_mapped -= len;
996
    }
997
 
998
  G.free_pages = NULL;
999
#endif
1000
#ifdef USING_MALLOC_PAGE_GROUPS
1001
  page_entry **pp, *p;
1002
  page_group **gp, *g;
1003
 
1004
  /* Remove all pages from free page groups from the list.  */
1005
  pp = &G.free_pages;
1006
  while ((p = *pp) != NULL)
1007
    if (p->group->in_use == 0)
1008
      {
1009
        *pp = p->next;
1010
        free (p);
1011
      }
1012
    else
1013
      pp = &p->next;
1014
 
1015
  /* Remove all free page groups, and release the storage.  */
1016
  gp = &G.page_groups;
1017
  while ((g = *gp) != NULL)
1018
    if (g->in_use == 0)
1019
      {
1020
        *gp = g->next;
1021
        G.bytes_mapped -= g->alloc_size;
1022
        free (g->allocation);
1023
      }
1024
    else
1025
      gp = &g->next;
1026
#endif
1027
}
1028
 
1029
/* This table provides a fast way to determine ceil(log_2(size)) for
1030
   allocation requests.  The minimum allocation size is eight bytes.  */
1031
#define NUM_SIZE_LOOKUP 512
1032
static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1033
{
1034
  3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1035
  4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1036
  5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1037
  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1038
  6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1039
  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1040
  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1041
  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1042
  7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1043
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1044
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1045
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1046
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1047
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1048
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1049
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1050
  8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1056
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1057
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1058
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1059
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1060
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1061
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1062
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1063
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1064
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1065
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1066
};
1067
 
1068
/* Typed allocation function.  Does nothing special in this collector.  */
1069
 
1070
void *
1071
ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1072
                      MEM_STAT_DECL)
1073
{
1074
  return ggc_alloc_stat (size PASS_MEM_STAT);
1075
}
1076
 
1077
/* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1078
 
1079
void *
1080
ggc_alloc_stat (size_t size MEM_STAT_DECL)
1081
{
1082
  size_t order, word, bit, object_offset, object_size;
1083
  struct page_entry *entry;
1084
  void *result;
1085
 
1086
  if (size < NUM_SIZE_LOOKUP)
1087
    {
1088
      order = size_lookup[size];
1089
      object_size = OBJECT_SIZE (order);
1090
    }
1091
  else
1092
    {
1093
      order = 10;
1094
      while (size > (object_size = OBJECT_SIZE (order)))
1095
        order++;
1096
    }
1097
 
1098
  /* If there are non-full pages for this size allocation, they are at
1099
     the head of the list.  */
1100
  entry = G.pages[order];
1101
 
1102
  /* If there is no page for this object size, or all pages in this
1103
     context are full, allocate a new page.  */
1104
  if (entry == NULL || entry->num_free_objects == 0)
1105
    {
1106
      struct page_entry *new_entry;
1107
      new_entry = alloc_page (order);
1108
 
1109
      new_entry->index_by_depth = G.by_depth_in_use;
1110
      push_by_depth (new_entry, 0);
1111
 
1112
      /* We can skip context depths, if we do, make sure we go all the
1113
         way to the new depth.  */
1114
      while (new_entry->context_depth >= G.depth_in_use)
1115
        push_depth (G.by_depth_in_use-1);
1116
 
1117
      /* If this is the only entry, it's also the tail.  If it is not
1118
         the only entry, then we must update the PREV pointer of the
1119
         ENTRY (G.pages[order]) to point to our new page entry.  */
1120
      if (entry == NULL)
1121
        G.page_tails[order] = new_entry;
1122
      else
1123
        entry->prev = new_entry;
1124
 
1125
      /* Put new pages at the head of the page list.  By definition the
1126
         entry at the head of the list always has a NULL pointer.  */
1127
      new_entry->next = entry;
1128
      new_entry->prev = NULL;
1129
      entry = new_entry;
1130
      G.pages[order] = new_entry;
1131
 
1132
      /* For a new page, we know the word and bit positions (in the
1133
         in_use bitmap) of the first available object -- they're zero.  */
1134
      new_entry->next_bit_hint = 1;
1135
      word = 0;
1136
      bit = 0;
1137
      object_offset = 0;
1138
    }
1139
  else
1140
    {
1141
      /* First try to use the hint left from the previous allocation
1142
         to locate a clear bit in the in-use bitmap.  We've made sure
1143
         that the one-past-the-end bit is always set, so if the hint
1144
         has run over, this test will fail.  */
1145
      unsigned hint = entry->next_bit_hint;
1146
      word = hint / HOST_BITS_PER_LONG;
1147
      bit = hint % HOST_BITS_PER_LONG;
1148
 
1149
      /* If the hint didn't work, scan the bitmap from the beginning.  */
1150
      if ((entry->in_use_p[word] >> bit) & 1)
1151
        {
1152
          word = bit = 0;
1153
          while (~entry->in_use_p[word] == 0)
1154
            ++word;
1155
 
1156
#if GCC_VERSION >= 3004
1157
          bit = __builtin_ctzl (~entry->in_use_p[word]);
1158
#else
1159
          while ((entry->in_use_p[word] >> bit) & 1)
1160
            ++bit;
1161
#endif
1162
 
1163
          hint = word * HOST_BITS_PER_LONG + bit;
1164
        }
1165
 
1166
      /* Next time, try the next bit.  */
1167
      entry->next_bit_hint = hint + 1;
1168
 
1169
      object_offset = hint * object_size;
1170
    }
1171
 
1172
  /* Set the in-use bit.  */
1173
  entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1174
 
1175
  /* Keep a running total of the number of free objects.  If this page
1176
     fills up, we may have to move it to the end of the list if the
1177
     next page isn't full.  If the next page is full, all subsequent
1178
     pages are full, so there's no need to move it.  */
1179
  if (--entry->num_free_objects == 0
1180
      && entry->next != NULL
1181
      && entry->next->num_free_objects > 0)
1182
    {
1183
      /* We have a new head for the list.  */
1184
      G.pages[order] = entry->next;
1185
 
1186
      /* We are moving ENTRY to the end of the page table list.
1187
         The new page at the head of the list will have NULL in
1188
         its PREV field and ENTRY will have NULL in its NEXT field.  */
1189
      entry->next->prev = NULL;
1190
      entry->next = NULL;
1191
 
1192
      /* Append ENTRY to the tail of the list.  */
1193
      entry->prev = G.page_tails[order];
1194
      G.page_tails[order]->next = entry;
1195
      G.page_tails[order] = entry;
1196
    }
1197
 
1198
  /* Calculate the object's address.  */
1199
  result = entry->page + object_offset;
1200
#ifdef GATHER_STATISTICS
1201
  ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1202
                       result PASS_MEM_STAT);
1203
#endif
1204
 
1205
#ifdef ENABLE_GC_CHECKING
1206
  /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1207
     exact same semantics in presence of memory bugs, regardless of
1208
     ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1209
     handle to avoid handle leak.  */
1210
  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1211
 
1212
  /* `Poison' the entire allocated object, including any padding at
1213
     the end.  */
1214
  memset (result, 0xaf, object_size);
1215
 
1216
  /* Make the bytes after the end of the object unaccessible.  Discard the
1217
     handle to avoid handle leak.  */
1218
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1219
                                            object_size - size));
1220
#endif
1221
 
1222
  /* Tell Valgrind that the memory is there, but its content isn't
1223
     defined.  The bytes at the end of the object are still marked
1224
     unaccessible.  */
1225
  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1226
 
1227
  /* Keep track of how many bytes are being allocated.  This
1228
     information is used in deciding when to collect.  */
1229
  G.allocated += object_size;
1230
 
1231
  /* For timevar statistics.  */
1232
  timevar_ggc_mem_total += object_size;
1233
 
1234
#ifdef GATHER_STATISTICS
1235
  {
1236
    size_t overhead = object_size - size;
1237
 
1238
    G.stats.total_overhead += overhead;
1239
    G.stats.total_allocated += object_size;
1240
    G.stats.total_overhead_per_order[order] += overhead;
1241
    G.stats.total_allocated_per_order[order] += object_size;
1242
 
1243
    if (size <= 32)
1244
      {
1245
        G.stats.total_overhead_under32 += overhead;
1246
        G.stats.total_allocated_under32 += object_size;
1247
      }
1248
    if (size <= 64)
1249
      {
1250
        G.stats.total_overhead_under64 += overhead;
1251
        G.stats.total_allocated_under64 += object_size;
1252
      }
1253
    if (size <= 128)
1254
      {
1255
        G.stats.total_overhead_under128 += overhead;
1256
        G.stats.total_allocated_under128 += object_size;
1257
      }
1258
  }
1259
#endif
1260
 
1261
  if (GGC_DEBUG_LEVEL >= 3)
1262
    fprintf (G.debug_file,
1263
             "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1264
             (unsigned long) size, (unsigned long) object_size, result,
1265
             (void *) entry);
1266
 
1267
  return result;
1268
}
1269
 
1270
/* If P is not marked, marks it and return false.  Otherwise return true.
1271
   P must have been allocated by the GC allocator; it mustn't point to
1272
   static objects, stack variables, or memory allocated with malloc.  */
1273
 
1274
int
1275
ggc_set_mark (const void *p)
1276
{
1277
  page_entry *entry;
1278
  unsigned bit, word;
1279
  unsigned long mask;
1280
 
1281
  /* Look up the page on which the object is alloced.  If the object
1282
     wasn't allocated by the collector, we'll probably die.  */
1283
  entry = lookup_page_table_entry (p);
1284
  gcc_assert (entry);
1285
 
1286
  /* Calculate the index of the object on the page; this is its bit
1287
     position in the in_use_p bitmap.  */
1288
  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1289
  word = bit / HOST_BITS_PER_LONG;
1290
  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1291
 
1292
  /* If the bit was previously set, skip it.  */
1293
  if (entry->in_use_p[word] & mask)
1294
    return 1;
1295
 
1296
  /* Otherwise set it, and decrement the free object count.  */
1297
  entry->in_use_p[word] |= mask;
1298
  entry->num_free_objects -= 1;
1299
 
1300
  if (GGC_DEBUG_LEVEL >= 4)
1301
    fprintf (G.debug_file, "Marking %p\n", p);
1302
 
1303
  return 0;
1304
}
1305
 
1306
/* Return 1 if P has been marked, zero otherwise.
1307
   P must have been allocated by the GC allocator; it mustn't point to
1308
   static objects, stack variables, or memory allocated with malloc.  */
1309
 
1310
int
1311
ggc_marked_p (const void *p)
1312
{
1313
  page_entry *entry;
1314
  unsigned bit, word;
1315
  unsigned long mask;
1316
 
1317
  /* Look up the page on which the object is alloced.  If the object
1318
     wasn't allocated by the collector, we'll probably die.  */
1319
  entry = lookup_page_table_entry (p);
1320
  gcc_assert (entry);
1321
 
1322
  /* Calculate the index of the object on the page; this is its bit
1323
     position in the in_use_p bitmap.  */
1324
  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1325
  word = bit / HOST_BITS_PER_LONG;
1326
  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1327
 
1328
  return (entry->in_use_p[word] & mask) != 0;
1329
}
1330
 
1331
/* Return the size of the gc-able object P.  */
1332
 
1333
size_t
1334
ggc_get_size (const void *p)
1335
{
1336
  page_entry *pe = lookup_page_table_entry (p);
1337
  return OBJECT_SIZE (pe->order);
1338
}
1339
 
1340
/* Release the memory for object P.  */
1341
 
1342
void
1343
ggc_free (void *p)
1344
{
1345
  page_entry *pe = lookup_page_table_entry (p);
1346
  size_t order = pe->order;
1347
  size_t size = OBJECT_SIZE (order);
1348
 
1349
#ifdef GATHER_STATISTICS
1350
  ggc_free_overhead (p);
1351
#endif
1352
 
1353
  if (GGC_DEBUG_LEVEL >= 3)
1354
    fprintf (G.debug_file,
1355
             "Freeing object, actual size=%lu, at %p on %p\n",
1356
             (unsigned long) size, p, (void *) pe);
1357
 
1358
#ifdef ENABLE_GC_CHECKING
1359
  /* Poison the data, to indicate the data is garbage.  */
1360
  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1361
  memset (p, 0xa5, size);
1362
#endif
1363
  /* Let valgrind know the object is free.  */
1364
  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1365
 
1366
#ifdef ENABLE_GC_ALWAYS_COLLECT
1367
  /* In the completely-anal-checking mode, we do *not* immediately free
1368
     the data, but instead verify that the data is *actually* not
1369
     reachable the next time we collect.  */
1370
  {
1371
    struct free_object *fo = XNEW (struct free_object);
1372
    fo->object = p;
1373
    fo->next = G.free_object_list;
1374
    G.free_object_list = fo;
1375
  }
1376
#else
1377
  {
1378
    unsigned int bit_offset, word, bit;
1379
 
1380
    G.allocated -= size;
1381
 
1382
    /* Mark the object not-in-use.  */
1383
    bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1384
    word = bit_offset / HOST_BITS_PER_LONG;
1385
    bit = bit_offset % HOST_BITS_PER_LONG;
1386
    pe->in_use_p[word] &= ~(1UL << bit);
1387
 
1388
    if (pe->num_free_objects++ == 0)
1389
      {
1390
        page_entry *p, *q;
1391
 
1392
        /* If the page is completely full, then it's supposed to
1393
           be after all pages that aren't.  Since we've freed one
1394
           object from a page that was full, we need to move the
1395
           page to the head of the list.
1396
 
1397
           PE is the node we want to move.  Q is the previous node
1398
           and P is the next node in the list.  */
1399
        q = pe->prev;
1400
        if (q && q->num_free_objects == 0)
1401
          {
1402
            p = pe->next;
1403
 
1404
            q->next = p;
1405
 
1406
            /* If PE was at the end of the list, then Q becomes the
1407
               new end of the list.  If PE was not the end of the
1408
               list, then we need to update the PREV field for P.  */
1409
            if (!p)
1410
              G.page_tails[order] = q;
1411
            else
1412
              p->prev = q;
1413
 
1414
            /* Move PE to the head of the list.  */
1415
            pe->next = G.pages[order];
1416
            pe->prev = NULL;
1417
            G.pages[order]->prev = pe;
1418
            G.pages[order] = pe;
1419
          }
1420
 
1421
        /* Reset the hint bit to point to the only free object.  */
1422
        pe->next_bit_hint = bit_offset;
1423
      }
1424
  }
1425
#endif
1426
}
1427
 
1428
/* Subroutine of init_ggc which computes the pair of numbers used to
1429
   perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1430
 
1431
   This algorithm is taken from Granlund and Montgomery's paper
1432
   "Division by Invariant Integers using Multiplication"
1433
   (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1434
   constants).  */
1435
 
1436
static void
1437
compute_inverse (unsigned order)
1438
{
1439
  size_t size, inv;
1440
  unsigned int e;
1441
 
1442
  size = OBJECT_SIZE (order);
1443
  e = 0;
1444
  while (size % 2 == 0)
1445
    {
1446
      e++;
1447
      size >>= 1;
1448
    }
1449
 
1450
  inv = size;
1451
  while (inv * size != 1)
1452
    inv = inv * (2 - inv*size);
1453
 
1454
  DIV_MULT (order) = inv;
1455
  DIV_SHIFT (order) = e;
1456
}
1457
 
1458
/* Initialize the ggc-mmap allocator.  */
1459
void
1460
init_ggc (void)
1461
{
1462
  unsigned order;
1463
 
1464
  G.pagesize = getpagesize();
1465
  G.lg_pagesize = exact_log2 (G.pagesize);
1466
 
1467
#ifdef HAVE_MMAP_DEV_ZERO
1468
  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1469
  if (G.dev_zero_fd == -1)
1470
    internal_error ("open /dev/zero: %m");
1471
#endif
1472
 
1473
#if 0
1474
  G.debug_file = fopen ("ggc-mmap.debug", "w");
1475
#else
1476
  G.debug_file = stdout;
1477
#endif
1478
 
1479
#ifdef USING_MMAP
1480
  /* StunOS has an amazing off-by-one error for the first mmap allocation
1481
     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1482
     believe, is an unaligned page allocation, which would cause us to
1483
     hork badly if we tried to use it.  */
1484
  {
1485
    char *p = alloc_anon (NULL, G.pagesize);
1486
    struct page_entry *e;
1487
    if ((size_t)p & (G.pagesize - 1))
1488
      {
1489
        /* How losing.  Discard this one and try another.  If we still
1490
           can't get something useful, give up.  */
1491
 
1492
        p = alloc_anon (NULL, G.pagesize);
1493
        gcc_assert (!((size_t)p & (G.pagesize - 1)));
1494
      }
1495
 
1496
    /* We have a good page, might as well hold onto it...  */
1497
    e = XCNEW (struct page_entry);
1498
    e->bytes = G.pagesize;
1499
    e->page = p;
1500
    e->next = G.free_pages;
1501
    G.free_pages = e;
1502
  }
1503
#endif
1504
 
1505
  /* Initialize the object size table.  */
1506
  for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1507
    object_size_table[order] = (size_t) 1 << order;
1508
  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1509
    {
1510
      size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1511
 
1512
      /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1513
         so that we're sure of getting aligned memory.  */
1514
      s = ROUND_UP (s, MAX_ALIGNMENT);
1515
      object_size_table[order] = s;
1516
    }
1517
 
1518
  /* Initialize the objects-per-page and inverse tables.  */
1519
  for (order = 0; order < NUM_ORDERS; ++order)
1520
    {
1521
      objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1522
      if (objects_per_page_table[order] == 0)
1523
        objects_per_page_table[order] = 1;
1524
      compute_inverse (order);
1525
    }
1526
 
1527
  /* Reset the size_lookup array to put appropriately sized objects in
1528
     the special orders.  All objects bigger than the previous power
1529
     of two, but no greater than the special size, should go in the
1530
     new order.  */
1531
  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1532
    {
1533
      int o;
1534
      int i;
1535
 
1536
      i = OBJECT_SIZE (order);
1537
      if (i >= NUM_SIZE_LOOKUP)
1538
        continue;
1539
 
1540
      for (o = size_lookup[i]; o == size_lookup [i]; --i)
1541
        size_lookup[i] = order;
1542
    }
1543
 
1544
  G.depth_in_use = 0;
1545
  G.depth_max = 10;
1546
  G.depth = XNEWVEC (unsigned int, G.depth_max);
1547
 
1548
  G.by_depth_in_use = 0;
1549
  G.by_depth_max = INITIAL_PTE_COUNT;
1550
  G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1551
  G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1552
}
1553
 
1554
/* Start a new GGC zone.  */
1555
 
1556
struct alloc_zone *
1557
new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1558
{
1559
  return NULL;
1560
}
1561
 
1562
/* Destroy a GGC zone.  */
1563
void
1564
destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1565
{
1566
}
1567
 
1568
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1569
   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1570
 
1571
static void
1572
ggc_recalculate_in_use_p (page_entry *p)
1573
{
1574
  unsigned int i;
1575
  size_t num_objects;
1576
 
1577
  /* Because the past-the-end bit in in_use_p is always set, we
1578
     pretend there is one additional object.  */
1579
  num_objects = OBJECTS_IN_PAGE (p) + 1;
1580
 
1581
  /* Reset the free object count.  */
1582
  p->num_free_objects = num_objects;
1583
 
1584
  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1585
  for (i = 0;
1586
       i < CEIL (BITMAP_SIZE (num_objects),
1587
                 sizeof (*p->in_use_p));
1588
       ++i)
1589
    {
1590
      unsigned long j;
1591
 
1592
      /* Something is in use if it is marked, or if it was in use in a
1593
         context further down the context stack.  */
1594
      p->in_use_p[i] |= save_in_use_p (p)[i];
1595
 
1596
      /* Decrement the free object count for every object allocated.  */
1597
      for (j = p->in_use_p[i]; j; j >>= 1)
1598
        p->num_free_objects -= (j & 1);
1599
    }
1600
 
1601
  gcc_assert (p->num_free_objects < num_objects);
1602
}
1603
 
1604
/* Unmark all objects.  */
1605
 
1606
static void
1607
clear_marks (void)
1608
{
1609
  unsigned order;
1610
 
1611
  for (order = 2; order < NUM_ORDERS; order++)
1612
    {
1613
      page_entry *p;
1614
 
1615
      for (p = G.pages[order]; p != NULL; p = p->next)
1616
        {
1617
          size_t num_objects = OBJECTS_IN_PAGE (p);
1618
          size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1619
 
1620
          /* The data should be page-aligned.  */
1621
          gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1622
 
1623
          /* Pages that aren't in the topmost context are not collected;
1624
             nevertheless, we need their in-use bit vectors to store GC
1625
             marks.  So, back them up first.  */
1626
          if (p->context_depth < G.context_depth)
1627
            {
1628
              if (! save_in_use_p (p))
1629
                save_in_use_p (p) = xmalloc (bitmap_size);
1630
              memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1631
            }
1632
 
1633
          /* Reset reset the number of free objects and clear the
1634
             in-use bits.  These will be adjusted by mark_obj.  */
1635
          p->num_free_objects = num_objects;
1636
          memset (p->in_use_p, 0, bitmap_size);
1637
 
1638
          /* Make sure the one-past-the-end bit is always set.  */
1639
          p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1640
            = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1641
        }
1642
    }
1643
}
1644
 
1645
/* Free all empty pages.  Partially empty pages need no attention
1646
   because the `mark' bit doubles as an `unused' bit.  */
1647
 
1648
static void
1649
sweep_pages (void)
1650
{
1651
  unsigned order;
1652
 
1653
  for (order = 2; order < NUM_ORDERS; order++)
1654
    {
1655
      /* The last page-entry to consider, regardless of entries
1656
         placed at the end of the list.  */
1657
      page_entry * const last = G.page_tails[order];
1658
 
1659
      size_t num_objects;
1660
      size_t live_objects;
1661
      page_entry *p, *previous;
1662
      int done;
1663
 
1664
      p = G.pages[order];
1665
      if (p == NULL)
1666
        continue;
1667
 
1668
      previous = NULL;
1669
      do
1670
        {
1671
          page_entry *next = p->next;
1672
 
1673
          /* Loop until all entries have been examined.  */
1674
          done = (p == last);
1675
 
1676
          num_objects = OBJECTS_IN_PAGE (p);
1677
 
1678
          /* Add all live objects on this page to the count of
1679
             allocated memory.  */
1680
          live_objects = num_objects - p->num_free_objects;
1681
 
1682
          G.allocated += OBJECT_SIZE (order) * live_objects;
1683
 
1684
          /* Only objects on pages in the topmost context should get
1685
             collected.  */
1686
          if (p->context_depth < G.context_depth)
1687
            ;
1688
 
1689
          /* Remove the page if it's empty.  */
1690
          else if (live_objects == 0)
1691
            {
1692
              /* If P was the first page in the list, then NEXT
1693
                 becomes the new first page in the list, otherwise
1694
                 splice P out of the forward pointers.  */
1695
              if (! previous)
1696
                G.pages[order] = next;
1697
              else
1698
                previous->next = next;
1699
 
1700
              /* Splice P out of the back pointers too.  */
1701
              if (next)
1702
                next->prev = previous;
1703
 
1704
              /* Are we removing the last element?  */
1705
              if (p == G.page_tails[order])
1706
                G.page_tails[order] = previous;
1707
              free_page (p);
1708
              p = previous;
1709
            }
1710
 
1711
          /* If the page is full, move it to the end.  */
1712
          else if (p->num_free_objects == 0)
1713
            {
1714
              /* Don't move it if it's already at the end.  */
1715
              if (p != G.page_tails[order])
1716
                {
1717
                  /* Move p to the end of the list.  */
1718
                  p->next = NULL;
1719
                  p->prev = G.page_tails[order];
1720
                  G.page_tails[order]->next = p;
1721
 
1722
                  /* Update the tail pointer...  */
1723
                  G.page_tails[order] = p;
1724
 
1725
                  /* ... and the head pointer, if necessary.  */
1726
                  if (! previous)
1727
                    G.pages[order] = next;
1728
                  else
1729
                    previous->next = next;
1730
 
1731
                  /* And update the backpointer in NEXT if necessary.  */
1732
                  if (next)
1733
                    next->prev = previous;
1734
 
1735
                  p = previous;
1736
                }
1737
            }
1738
 
1739
          /* If we've fallen through to here, it's a page in the
1740
             topmost context that is neither full nor empty.  Such a
1741
             page must precede pages at lesser context depth in the
1742
             list, so move it to the head.  */
1743
          else if (p != G.pages[order])
1744
            {
1745
              previous->next = p->next;
1746
 
1747
              /* Update the backchain in the next node if it exists.  */
1748
              if (p->next)
1749
                p->next->prev = previous;
1750
 
1751
              /* Move P to the head of the list.  */
1752
              p->next = G.pages[order];
1753
              p->prev = NULL;
1754
              G.pages[order]->prev = p;
1755
 
1756
              /* Update the head pointer.  */
1757
              G.pages[order] = p;
1758
 
1759
              /* Are we moving the last element?  */
1760
              if (G.page_tails[order] == p)
1761
                G.page_tails[order] = previous;
1762
              p = previous;
1763
            }
1764
 
1765
          previous = p;
1766
          p = next;
1767
        }
1768
      while (! done);
1769
 
1770
      /* Now, restore the in_use_p vectors for any pages from contexts
1771
         other than the current one.  */
1772
      for (p = G.pages[order]; p; p = p->next)
1773
        if (p->context_depth != G.context_depth)
1774
          ggc_recalculate_in_use_p (p);
1775
    }
1776
}
1777
 
1778
#ifdef ENABLE_GC_CHECKING
1779
/* Clobber all free objects.  */
1780
 
1781
static void
1782
poison_pages (void)
1783
{
1784
  unsigned order;
1785
 
1786
  for (order = 2; order < NUM_ORDERS; order++)
1787
    {
1788
      size_t size = OBJECT_SIZE (order);
1789
      page_entry *p;
1790
 
1791
      for (p = G.pages[order]; p != NULL; p = p->next)
1792
        {
1793
          size_t num_objects;
1794
          size_t i;
1795
 
1796
          if (p->context_depth != G.context_depth)
1797
            /* Since we don't do any collection for pages in pushed
1798
               contexts, there's no need to do any poisoning.  And
1799
               besides, the IN_USE_P array isn't valid until we pop
1800
               contexts.  */
1801
            continue;
1802
 
1803
          num_objects = OBJECTS_IN_PAGE (p);
1804
          for (i = 0; i < num_objects; i++)
1805
            {
1806
              size_t word, bit;
1807
              word = i / HOST_BITS_PER_LONG;
1808
              bit = i % HOST_BITS_PER_LONG;
1809
              if (((p->in_use_p[word] >> bit) & 1) == 0)
1810
                {
1811
                  char *object = p->page + i * size;
1812
 
1813
                  /* Keep poison-by-write when we expect to use Valgrind,
1814
                     so the exact same memory semantics is kept, in case
1815
                     there are memory errors.  We override this request
1816
                     below.  */
1817
                  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1818
                  memset (object, 0xa5, size);
1819
 
1820
                  /* Drop the handle to avoid handle leak.  */
1821
                  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1822
                }
1823
            }
1824
        }
1825
    }
1826
}
1827
#else
1828
#define poison_pages()
1829
#endif
1830
 
1831
#ifdef ENABLE_GC_ALWAYS_COLLECT
1832
/* Validate that the reportedly free objects actually are.  */
1833
 
1834
static void
1835
validate_free_objects (void)
1836
{
1837
  struct free_object *f, *next, *still_free = NULL;
1838
 
1839
  for (f = G.free_object_list; f ; f = next)
1840
    {
1841
      page_entry *pe = lookup_page_table_entry (f->object);
1842
      size_t bit, word;
1843
 
1844
      bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1845
      word = bit / HOST_BITS_PER_LONG;
1846
      bit = bit % HOST_BITS_PER_LONG;
1847
      next = f->next;
1848
 
1849
      /* Make certain it isn't visible from any root.  Notice that we
1850
         do this check before sweep_pages merges save_in_use_p.  */
1851
      gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1852
 
1853
      /* If the object comes from an outer context, then retain the
1854
         free_object entry, so that we can verify that the address
1855
         isn't live on the stack in some outer context.  */
1856
      if (pe->context_depth != G.context_depth)
1857
        {
1858
          f->next = still_free;
1859
          still_free = f;
1860
        }
1861
      else
1862
        free (f);
1863
    }
1864
 
1865
  G.free_object_list = still_free;
1866
}
1867
#else
1868
#define validate_free_objects()
1869
#endif
1870
 
1871
/* Top level mark-and-sweep routine.  */
1872
 
1873
void
1874
ggc_collect (void)
1875
{
1876
  /* Avoid frequent unnecessary work by skipping collection if the
1877
     total allocations haven't expanded much since the last
1878
     collection.  */
1879
  float allocated_last_gc =
1880
    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1881
 
1882
  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1883
 
1884
  if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1885
    return;
1886
 
1887
  timevar_push (TV_GC);
1888
  if (!quiet_flag)
1889
    fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1890
  if (GGC_DEBUG_LEVEL >= 2)
1891
    fprintf (G.debug_file, "BEGIN COLLECTING\n");
1892
 
1893
  /* Zero the total allocated bytes.  This will be recalculated in the
1894
     sweep phase.  */
1895
  G.allocated = 0;
1896
 
1897
  /* Release the pages we freed the last time we collected, but didn't
1898
     reuse in the interim.  */
1899
  release_pages ();
1900
 
1901
  /* Indicate that we've seen collections at this context depth.  */
1902
  G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1903
 
1904
  clear_marks ();
1905
  ggc_mark_roots ();
1906
#ifdef GATHER_STATISTICS
1907
  ggc_prune_overhead_list ();
1908
#endif
1909
  poison_pages ();
1910
  validate_free_objects ();
1911
  sweep_pages ();
1912
 
1913
  G.allocated_last_gc = G.allocated;
1914
 
1915
  timevar_pop (TV_GC);
1916
 
1917
  if (!quiet_flag)
1918
    fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1919
  if (GGC_DEBUG_LEVEL >= 2)
1920
    fprintf (G.debug_file, "END COLLECTING\n");
1921
}
1922
 
1923
/* Print allocation statistics.  */
1924
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1925
                  ? (x) \
1926
                  : ((x) < 1024*1024*10 \
1927
                     ? (x) / 1024 \
1928
                     : (x) / (1024*1024))))
1929
#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1930
 
1931
void
1932
ggc_print_statistics (void)
1933
{
1934
  struct ggc_statistics stats;
1935
  unsigned int i;
1936
  size_t total_overhead = 0;
1937
 
1938
  /* Clear the statistics.  */
1939
  memset (&stats, 0, sizeof (stats));
1940
 
1941
  /* Make sure collection will really occur.  */
1942
  G.allocated_last_gc = 0;
1943
 
1944
  /* Collect and print the statistics common across collectors.  */
1945
  ggc_print_common_statistics (stderr, &stats);
1946
 
1947
  /* Release free pages so that we will not count the bytes allocated
1948
     there as part of the total allocated memory.  */
1949
  release_pages ();
1950
 
1951
  /* Collect some information about the various sizes of
1952
     allocation.  */
1953
  fprintf (stderr,
1954
           "Memory still allocated at the end of the compilation process\n");
1955
  fprintf (stderr, "%-5s %10s  %10s  %10s\n",
1956
           "Size", "Allocated", "Used", "Overhead");
1957
  for (i = 0; i < NUM_ORDERS; ++i)
1958
    {
1959
      page_entry *p;
1960
      size_t allocated;
1961
      size_t in_use;
1962
      size_t overhead;
1963
 
1964
      /* Skip empty entries.  */
1965
      if (!G.pages[i])
1966
        continue;
1967
 
1968
      overhead = allocated = in_use = 0;
1969
 
1970
      /* Figure out the total number of bytes allocated for objects of
1971
         this size, and how many of them are actually in use.  Also figure
1972
         out how much memory the page table is using.  */
1973
      for (p = G.pages[i]; p; p = p->next)
1974
        {
1975
          allocated += p->bytes;
1976
          in_use +=
1977
            (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1978
 
1979
          overhead += (sizeof (page_entry) - sizeof (long)
1980
                       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1981
        }
1982
      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1983
               (unsigned long) OBJECT_SIZE (i),
1984
               SCALE (allocated), STAT_LABEL (allocated),
1985
               SCALE (in_use), STAT_LABEL (in_use),
1986
               SCALE (overhead), STAT_LABEL (overhead));
1987
      total_overhead += overhead;
1988
    }
1989
  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1990
           SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
1991
           SCALE (G.allocated), STAT_LABEL(G.allocated),
1992
           SCALE (total_overhead), STAT_LABEL (total_overhead));
1993
 
1994
#ifdef GATHER_STATISTICS  
1995
  {
1996
    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1997
 
1998
    fprintf (stderr, "Total Overhead:                        %10lld\n",
1999
             G.stats.total_overhead);
2000
    fprintf (stderr, "Total Allocated:                       %10lld\n",
2001
             G.stats.total_allocated);
2002
 
2003
    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
2004
             G.stats.total_overhead_under32);
2005
    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
2006
             G.stats.total_allocated_under32);
2007
    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
2008
             G.stats.total_overhead_under64);
2009
    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
2010
             G.stats.total_allocated_under64);
2011
    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
2012
             G.stats.total_overhead_under128);
2013
    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
2014
             G.stats.total_allocated_under128);
2015
 
2016
    for (i = 0; i < NUM_ORDERS; i++)
2017
      if (G.stats.total_allocated_per_order[i])
2018
        {
2019
          fprintf (stderr, "Total Overhead  page size %7d:     %10lld\n",
2020
                   OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2021
          fprintf (stderr, "Total Allocated page size %7d:     %10lld\n",
2022
                   OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2023
        }
2024
  }
2025
#endif
2026
}
2027
 
2028
struct ggc_pch_data
2029
{
2030
  struct ggc_pch_ondisk
2031
  {
2032
    unsigned totals[NUM_ORDERS];
2033
  } d;
2034
  size_t base[NUM_ORDERS];
2035
  size_t written[NUM_ORDERS];
2036
};
2037
 
2038
struct ggc_pch_data *
2039
init_ggc_pch (void)
2040
{
2041
  return XCNEW (struct ggc_pch_data);
2042
}
2043
 
2044
void
2045
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2046
                      size_t size, bool is_string ATTRIBUTE_UNUSED,
2047
                      enum gt_types_enum type ATTRIBUTE_UNUSED)
2048
{
2049
  unsigned order;
2050
 
2051
  if (size < NUM_SIZE_LOOKUP)
2052
    order = size_lookup[size];
2053
  else
2054
    {
2055
      order = 10;
2056
      while (size > OBJECT_SIZE (order))
2057
        order++;
2058
    }
2059
 
2060
  d->d.totals[order]++;
2061
}
2062
 
2063
size_t
2064
ggc_pch_total_size (struct ggc_pch_data *d)
2065
{
2066
  size_t a = 0;
2067
  unsigned i;
2068
 
2069
  for (i = 0; i < NUM_ORDERS; i++)
2070
    a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2071
  return a;
2072
}
2073
 
2074
void
2075
ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2076
{
2077
  size_t a = (size_t) base;
2078
  unsigned i;
2079
 
2080
  for (i = 0; i < NUM_ORDERS; i++)
2081
    {
2082
      d->base[i] = a;
2083
      a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2084
    }
2085
}
2086
 
2087
 
2088
char *
2089
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2090
                      size_t size, bool is_string ATTRIBUTE_UNUSED,
2091
                      enum gt_types_enum type ATTRIBUTE_UNUSED)
2092
{
2093
  unsigned order;
2094
  char *result;
2095
 
2096
  if (size < NUM_SIZE_LOOKUP)
2097
    order = size_lookup[size];
2098
  else
2099
    {
2100
      order = 10;
2101
      while (size > OBJECT_SIZE (order))
2102
        order++;
2103
    }
2104
 
2105
  result = (char *) d->base[order];
2106
  d->base[order] += OBJECT_SIZE (order);
2107
  return result;
2108
}
2109
 
2110
void
2111
ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2112
                       FILE *f ATTRIBUTE_UNUSED)
2113
{
2114
  /* Nothing to do.  */
2115
}
2116
 
2117
void
2118
ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2119
                      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2120
                      size_t size, bool is_string ATTRIBUTE_UNUSED)
2121
{
2122
  unsigned order;
2123
  static const char emptyBytes[256];
2124
 
2125
  if (size < NUM_SIZE_LOOKUP)
2126
    order = size_lookup[size];
2127
  else
2128
    {
2129
      order = 10;
2130
      while (size > OBJECT_SIZE (order))
2131
        order++;
2132
    }
2133
 
2134
  if (fwrite (x, size, 1, f) != 1)
2135
    fatal_error ("can't write PCH file: %m");
2136
 
2137
  /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2138
     object out to OBJECT_SIZE(order).  This happens for strings.  */
2139
 
2140
  if (size != OBJECT_SIZE (order))
2141
    {
2142
      unsigned padding = OBJECT_SIZE(order) - size;
2143
 
2144
      /* To speed small writes, we use a nulled-out array that's larger
2145
         than most padding requests as the source for our null bytes.  This
2146
         permits us to do the padding with fwrite() rather than fseek(), and
2147
         limits the chance the OS may try to flush any outstanding writes.  */
2148
      if (padding <= sizeof(emptyBytes))
2149
        {
2150
          if (fwrite (emptyBytes, 1, padding, f) != padding)
2151
            fatal_error ("can't write PCH file");
2152
        }
2153
      else
2154
        {
2155
          /* Larger than our buffer?  Just default to fseek.  */
2156
          if (fseek (f, padding, SEEK_CUR) != 0)
2157
            fatal_error ("can't write PCH file");
2158
        }
2159
    }
2160
 
2161
  d->written[order]++;
2162
  if (d->written[order] == d->d.totals[order]
2163
      && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2164
                                   G.pagesize),
2165
                SEEK_CUR) != 0)
2166
    fatal_error ("can't write PCH file: %m");
2167
}
2168
 
2169
void
2170
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2171
{
2172
  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2173
    fatal_error ("can't write PCH file: %m");
2174
  free (d);
2175
}
2176
 
2177
/* Move the PCH PTE entries just added to the end of by_depth, to the
2178
   front.  */
2179
 
2180
static void
2181
move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2182
{
2183
  unsigned i;
2184
 
2185
  /* First, we swap the new entries to the front of the varrays.  */
2186
  page_entry **new_by_depth;
2187
  unsigned long **new_save_in_use;
2188
 
2189
  new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2190
  new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2191
 
2192
  memcpy (&new_by_depth[0],
2193
          &G.by_depth[count_old_page_tables],
2194
          count_new_page_tables * sizeof (void *));
2195
  memcpy (&new_by_depth[count_new_page_tables],
2196
          &G.by_depth[0],
2197
          count_old_page_tables * sizeof (void *));
2198
  memcpy (&new_save_in_use[0],
2199
          &G.save_in_use[count_old_page_tables],
2200
          count_new_page_tables * sizeof (void *));
2201
  memcpy (&new_save_in_use[count_new_page_tables],
2202
          &G.save_in_use[0],
2203
          count_old_page_tables * sizeof (void *));
2204
 
2205
  free (G.by_depth);
2206
  free (G.save_in_use);
2207
 
2208
  G.by_depth = new_by_depth;
2209
  G.save_in_use = new_save_in_use;
2210
 
2211
  /* Now update all the index_by_depth fields.  */
2212
  for (i = G.by_depth_in_use; i > 0; --i)
2213
    {
2214
      page_entry *p = G.by_depth[i-1];
2215
      p->index_by_depth = i-1;
2216
    }
2217
 
2218
  /* And last, we update the depth pointers in G.depth.  The first
2219
     entry is already 0, and context 0 entries always start at index
2220
     0, so there is nothing to update in the first slot.  We need a
2221
     second slot, only if we have old ptes, and if we do, they start
2222
     at index count_new_page_tables.  */
2223
  if (count_old_page_tables)
2224
    push_depth (count_new_page_tables);
2225
}
2226
 
2227
void
2228
ggc_pch_read (FILE *f, void *addr)
2229
{
2230
  struct ggc_pch_ondisk d;
2231
  unsigned i;
2232
  char *offs = addr;
2233
  unsigned long count_old_page_tables;
2234
  unsigned long count_new_page_tables;
2235
 
2236
  count_old_page_tables = G.by_depth_in_use;
2237
 
2238
  /* We've just read in a PCH file.  So, every object that used to be
2239
     allocated is now free.  */
2240
  clear_marks ();
2241
#ifdef ENABLE_GC_CHECKING
2242
  poison_pages ();
2243
#endif
2244
 
2245
  /* No object read from a PCH file should ever be freed.  So, set the
2246
     context depth to 1, and set the depth of all the currently-allocated
2247
     pages to be 1 too.  PCH pages will have depth 0.  */
2248
  gcc_assert (!G.context_depth);
2249
  G.context_depth = 1;
2250
  for (i = 0; i < NUM_ORDERS; i++)
2251
    {
2252
      page_entry *p;
2253
      for (p = G.pages[i]; p != NULL; p = p->next)
2254
        p->context_depth = G.context_depth;
2255
    }
2256
 
2257
  /* Allocate the appropriate page-table entries for the pages read from
2258
     the PCH file.  */
2259
  if (fread (&d, sizeof (d), 1, f) != 1)
2260
    fatal_error ("can't read PCH file: %m");
2261
 
2262
  for (i = 0; i < NUM_ORDERS; i++)
2263
    {
2264
      struct page_entry *entry;
2265
      char *pte;
2266
      size_t bytes;
2267
      size_t num_objs;
2268
      size_t j;
2269
 
2270
      if (d.totals[i] == 0)
2271
        continue;
2272
 
2273
      bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2274
      num_objs = bytes / OBJECT_SIZE (i);
2275
      entry = xcalloc (1, (sizeof (struct page_entry)
2276
                           - sizeof (long)
2277
                           + BITMAP_SIZE (num_objs + 1)));
2278
      entry->bytes = bytes;
2279
      entry->page = offs;
2280
      entry->context_depth = 0;
2281
      offs += bytes;
2282
      entry->num_free_objects = 0;
2283
      entry->order = i;
2284
 
2285
      for (j = 0;
2286
           j + HOST_BITS_PER_LONG <= num_objs + 1;
2287
           j += HOST_BITS_PER_LONG)
2288
        entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2289
      for (; j < num_objs + 1; j++)
2290
        entry->in_use_p[j / HOST_BITS_PER_LONG]
2291
          |= 1L << (j % HOST_BITS_PER_LONG);
2292
 
2293
      for (pte = entry->page;
2294
           pte < entry->page + entry->bytes;
2295
           pte += G.pagesize)
2296
        set_page_table_entry (pte, entry);
2297
 
2298
      if (G.page_tails[i] != NULL)
2299
        G.page_tails[i]->next = entry;
2300
      else
2301
        G.pages[i] = entry;
2302
      G.page_tails[i] = entry;
2303
 
2304
      /* We start off by just adding all the new information to the
2305
         end of the varrays, later, we will move the new information
2306
         to the front of the varrays, as the PCH page tables are at
2307
         context 0.  */
2308
      push_by_depth (entry, 0);
2309
    }
2310
 
2311
  /* Now, we update the various data structures that speed page table
2312
     handling.  */
2313
  count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2314
 
2315
  move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2316
 
2317
  /* Update the statistics.  */
2318
  G.allocated = G.allocated_last_gc = offs - (char *)addr;
2319
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.