OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [ggc-common.c] - Blame information for rev 274

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Simple garbage collection for the GNU compiler.
2
   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3
   Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
/* Generic garbage collection (GC) functions and data, not specific to
22
   any particular GC implementation.  */
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "hashtab.h"
28
#include "ggc.h"
29
#include "toplev.h"
30
#include "params.h"
31
#include "hosthooks.h"
32
#include "hosthooks-def.h"
33
 
34
#ifdef HAVE_SYS_RESOURCE_H
35
# include <sys/resource.h>
36
#endif
37
 
38
#ifdef HAVE_MMAP_FILE
39
# include <sys/mman.h>
40
# ifdef HAVE_MINCORE
41
/* This is on Solaris.  */
42
#  include <sys/types.h> 
43
# endif
44
#endif
45
 
46
#ifndef MAP_FAILED
47
# define MAP_FAILED ((void *)-1)
48
#endif
49
 
50
#ifdef ENABLE_VALGRIND_CHECKING
51
# ifdef HAVE_VALGRIND_MEMCHECK_H
52
#  include <valgrind/memcheck.h>
53
# elif defined HAVE_MEMCHECK_H
54
#  include <memcheck.h>
55
# else
56
#  include <valgrind.h>
57
# endif
58
#else
59
/* Avoid #ifdef:s when we can help it.  */
60
#define VALGRIND_DISCARD(x)
61
#endif
62
 
63
/* When set, ggc_collect will do collection.  */
64
bool ggc_force_collect;
65
 
66
/* Statistics about the allocation.  */
67
static ggc_statistics *ggc_stats;
68
 
69
struct traversal_state;
70
 
71
static int ggc_htab_delete (void **, void *);
72
static hashval_t saving_htab_hash (const void *);
73
static int saving_htab_eq (const void *, const void *);
74
static int call_count (void **, void *);
75
static int call_alloc (void **, void *);
76
static int compare_ptr_data (const void *, const void *);
77
static void relocate_ptrs (void *, void *);
78
static void write_pch_globals (const struct ggc_root_tab * const *tab,
79
                               struct traversal_state *state);
80
static double ggc_rlimit_bound (double);
81
 
82
/* Maintain global roots that are preserved during GC.  */
83
 
84
/* Process a slot of an htab by deleting it if it has not been marked.  */
85
 
86
static int
87
ggc_htab_delete (void **slot, void *info)
88
{
89
  const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
90
 
91
  if (! (*r->marked_p) (*slot))
92
    htab_clear_slot (*r->base, slot);
93
  else
94
    (*r->cb) (*slot);
95
 
96
  return 1;
97
}
98
 
99
/* Iterate through all registered roots and mark each element.  */
100
 
101
void
102
ggc_mark_roots (void)
103
{
104
  const struct ggc_root_tab *const *rt;
105
  const struct ggc_root_tab *rti;
106
  const struct ggc_cache_tab *const *ct;
107
  const struct ggc_cache_tab *cti;
108
  size_t i;
109
 
110
  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
111
    for (rti = *rt; rti->base != NULL; rti++)
112
      memset (rti->base, 0, rti->stride);
113
 
114
  for (rt = gt_ggc_rtab; *rt; rt++)
115
    for (rti = *rt; rti->base != NULL; rti++)
116
      for (i = 0; i < rti->nelt; i++)
117
        (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
118
 
119
  ggc_mark_stringpool ();
120
 
121
  /* Now scan all hash tables that have objects which are to be deleted if
122
     they are not already marked.  */
123
  for (ct = gt_ggc_cache_rtab; *ct; ct++)
124
    for (cti = *ct; cti->base != NULL; cti++)
125
      if (*cti->base)
126
        {
127
          ggc_set_mark (*cti->base);
128
          htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
129
          ggc_set_mark ((*cti->base)->entries);
130
        }
131
}
132
 
133
/* Allocate a block of memory, then clear it.  */
134
void *
135
ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL)
136
{
137
  void *buf = ggc_alloc_stat (size PASS_MEM_STAT);
138
  memset (buf, 0, size);
139
  return buf;
140
}
141
 
142
/* Resize a block of memory, possibly re-allocating it.  */
143
void *
144
ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
145
{
146
  void *r;
147
  size_t old_size;
148
 
149
  if (x == NULL)
150
    return ggc_alloc_stat (size PASS_MEM_STAT);
151
 
152
  old_size = ggc_get_size (x);
153
 
154
  if (size <= old_size)
155
    {
156
      /* Mark the unwanted memory as unaccessible.  We also need to make
157
         the "new" size accessible, since ggc_get_size returns the size of
158
         the pool, not the size of the individually allocated object, the
159
         size which was previously made accessible.  Unfortunately, we
160
         don't know that previously allocated size.  Without that
161
         knowledge we have to lose some initialization-tracking for the
162
         old parts of the object.  An alternative is to mark the whole
163
         old_size as reachable, but that would lose tracking of writes
164
         after the end of the object (by small offsets).  Discard the
165
         handle to avoid handle leak.  */
166
      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
167
                                                old_size - size));
168
      VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
169
      return x;
170
    }
171
 
172
  r = ggc_alloc_stat (size PASS_MEM_STAT);
173
 
174
  /* Since ggc_get_size returns the size of the pool, not the size of the
175
     individually allocated object, we'd access parts of the old object
176
     that were marked invalid with the memcpy below.  We lose a bit of the
177
     initialization-tracking since some of it may be uninitialized.  */
178
  VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
179
 
180
  memcpy (r, x, old_size);
181
 
182
  /* The old object is not supposed to be used anymore.  */
183
  ggc_free (x);
184
 
185
  return r;
186
}
187
 
188
/* Like ggc_alloc_cleared, but performs a multiplication.  */
189
void *
190
ggc_calloc (size_t s1, size_t s2)
191
{
192
  return ggc_alloc_cleared (s1 * s2);
193
}
194
 
195
/* These are for splay_tree_new_ggc.  */
196
void *
197
ggc_splay_alloc (int sz, void *nl)
198
{
199
  gcc_assert (!nl);
200
  return ggc_alloc (sz);
201
}
202
 
203
void
204
ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
205
{
206
  gcc_assert (!nl);
207
}
208
 
209
/* Print statistics that are independent of the collector in use.  */
210
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
211
                  ? (x) \
212
                  : ((x) < 1024*1024*10 \
213
                     ? (x) / 1024 \
214
                     : (x) / (1024*1024))))
215
#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
216
 
217
void
218
ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
219
                             ggc_statistics *stats)
220
{
221
  /* Set the pointer so that during collection we will actually gather
222
     the statistics.  */
223
  ggc_stats = stats;
224
 
225
  /* Then do one collection to fill in the statistics.  */
226
  ggc_collect ();
227
 
228
  /* At present, we don't really gather any interesting statistics.  */
229
 
230
  /* Don't gather statistics any more.  */
231
  ggc_stats = NULL;
232
}
233
 
234
/* Functions for saving and restoring GCable memory to disk.  */
235
 
236
static htab_t saving_htab;
237
 
238
struct ptr_data
239
{
240
  void *obj;
241
  void *note_ptr_cookie;
242
  gt_note_pointers note_ptr_fn;
243
  gt_handle_reorder reorder_fn;
244
  size_t size;
245
  void *new_addr;
246
  enum gt_types_enum type;
247
};
248
 
249
#define POINTER_HASH(x) (hashval_t)((long)x >> 3)
250
 
251
/* Register an object in the hash table.  */
252
 
253
int
254
gt_pch_note_object (void *obj, void *note_ptr_cookie,
255
                    gt_note_pointers note_ptr_fn,
256
                    enum gt_types_enum type)
257
{
258
  struct ptr_data **slot;
259
 
260
  if (obj == NULL || obj == (void *) 1)
261
    return 0;
262
 
263
  slot = (struct ptr_data **)
264
    htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
265
                              INSERT);
266
  if (*slot != NULL)
267
    {
268
      gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
269
                  && (*slot)->note_ptr_cookie == note_ptr_cookie);
270
      return 0;
271
    }
272
 
273
  *slot = xcalloc (sizeof (struct ptr_data), 1);
274
  (*slot)->obj = obj;
275
  (*slot)->note_ptr_fn = note_ptr_fn;
276
  (*slot)->note_ptr_cookie = note_ptr_cookie;
277
  if (note_ptr_fn == gt_pch_p_S)
278
    (*slot)->size = strlen (obj) + 1;
279
  else
280
    (*slot)->size = ggc_get_size (obj);
281
  (*slot)->type = type;
282
  return 1;
283
}
284
 
285
/* Register an object in the hash table.  */
286
 
287
void
288
gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
289
                     gt_handle_reorder reorder_fn)
290
{
291
  struct ptr_data *data;
292
 
293
  if (obj == NULL || obj == (void *) 1)
294
    return;
295
 
296
  data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
297
  gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
298
 
299
  data->reorder_fn = reorder_fn;
300
}
301
 
302
/* Hash and equality functions for saving_htab, callbacks for htab_create.  */
303
 
304
static hashval_t
305
saving_htab_hash (const void *p)
306
{
307
  return POINTER_HASH (((struct ptr_data *)p)->obj);
308
}
309
 
310
static int
311
saving_htab_eq (const void *p1, const void *p2)
312
{
313
  return ((struct ptr_data *)p1)->obj == p2;
314
}
315
 
316
/* Handy state for the traversal functions.  */
317
 
318
struct traversal_state
319
{
320
  FILE *f;
321
  struct ggc_pch_data *d;
322
  size_t count;
323
  struct ptr_data **ptrs;
324
  size_t ptrs_i;
325
};
326
 
327
/* Callbacks for htab_traverse.  */
328
 
329
static int
330
call_count (void **slot, void *state_p)
331
{
332
  struct ptr_data *d = (struct ptr_data *)*slot;
333
  struct traversal_state *state = (struct traversal_state *)state_p;
334
 
335
  ggc_pch_count_object (state->d, d->obj, d->size,
336
                        d->note_ptr_fn == gt_pch_p_S,
337
                        d->type);
338
  state->count++;
339
  return 1;
340
}
341
 
342
static int
343
call_alloc (void **slot, void *state_p)
344
{
345
  struct ptr_data *d = (struct ptr_data *)*slot;
346
  struct traversal_state *state = (struct traversal_state *)state_p;
347
 
348
  d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
349
                                      d->note_ptr_fn == gt_pch_p_S,
350
                                      d->type);
351
  state->ptrs[state->ptrs_i++] = d;
352
  return 1;
353
}
354
 
355
/* Callback for qsort.  */
356
 
357
static int
358
compare_ptr_data (const void *p1_p, const void *p2_p)
359
{
360
  struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
361
  struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
362
  return (((size_t)p1->new_addr > (size_t)p2->new_addr)
363
          - ((size_t)p1->new_addr < (size_t)p2->new_addr));
364
}
365
 
366
/* Callbacks for note_ptr_fn.  */
367
 
368
static void
369
relocate_ptrs (void *ptr_p, void *state_p)
370
{
371
  void **ptr = (void **)ptr_p;
372
  struct traversal_state *state ATTRIBUTE_UNUSED
373
    = (struct traversal_state *)state_p;
374
  struct ptr_data *result;
375
 
376
  if (*ptr == NULL || *ptr == (void *)1)
377
    return;
378
 
379
  result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
380
  gcc_assert (result);
381
  *ptr = result->new_addr;
382
}
383
 
384
/* Write out, after relocation, the pointers in TAB.  */
385
static void
386
write_pch_globals (const struct ggc_root_tab * const *tab,
387
                   struct traversal_state *state)
388
{
389
  const struct ggc_root_tab *const *rt;
390
  const struct ggc_root_tab *rti;
391
  size_t i;
392
 
393
  for (rt = tab; *rt; rt++)
394
    for (rti = *rt; rti->base != NULL; rti++)
395
      for (i = 0; i < rti->nelt; i++)
396
        {
397
          void *ptr = *(void **)((char *)rti->base + rti->stride * i);
398
          struct ptr_data *new_ptr;
399
          if (ptr == NULL || ptr == (void *)1)
400
            {
401
              if (fwrite (&ptr, sizeof (void *), 1, state->f)
402
                  != 1)
403
                fatal_error ("can't write PCH file: %m");
404
            }
405
          else
406
            {
407
              new_ptr = htab_find_with_hash (saving_htab, ptr,
408
                                             POINTER_HASH (ptr));
409
              if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
410
                  != 1)
411
                fatal_error ("can't write PCH file: %m");
412
            }
413
        }
414
}
415
 
416
/* Hold the information we need to mmap the file back in.  */
417
 
418
struct mmap_info
419
{
420
  size_t offset;
421
  size_t size;
422
  void *preferred_base;
423
};
424
 
425
/* Write out the state of the compiler to F.  */
426
 
427
void
428
gt_pch_save (FILE *f)
429
{
430
  const struct ggc_root_tab *const *rt;
431
  const struct ggc_root_tab *rti;
432
  size_t i;
433
  struct traversal_state state;
434
  char *this_object = NULL;
435
  size_t this_object_size = 0;
436
  struct mmap_info mmi;
437
  const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
438
 
439
  gt_pch_save_stringpool ();
440
 
441
  saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
442
 
443
  for (rt = gt_ggc_rtab; *rt; rt++)
444
    for (rti = *rt; rti->base != NULL; rti++)
445
      for (i = 0; i < rti->nelt; i++)
446
        (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
447
 
448
  for (rt = gt_pch_cache_rtab; *rt; rt++)
449
    for (rti = *rt; rti->base != NULL; rti++)
450
      for (i = 0; i < rti->nelt; i++)
451
        (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
452
 
453
  /* Prepare the objects for writing, determine addresses and such.  */
454
  state.f = f;
455
  state.d = init_ggc_pch();
456
  state.count = 0;
457
  htab_traverse (saving_htab, call_count, &state);
458
 
459
  mmi.size = ggc_pch_total_size (state.d);
460
 
461
  /* Try to arrange things so that no relocation is necessary, but
462
     don't try very hard.  On most platforms, this will always work,
463
     and on the rest it's a lot of work to do better.
464
     (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
465
     HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
466
  mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
467
 
468
  ggc_pch_this_base (state.d, mmi.preferred_base);
469
 
470
  state.ptrs = XNEWVEC (struct ptr_data *, state.count);
471
  state.ptrs_i = 0;
472
  htab_traverse (saving_htab, call_alloc, &state);
473
  qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
474
 
475
  /* Write out all the scalar variables.  */
476
  for (rt = gt_pch_scalar_rtab; *rt; rt++)
477
    for (rti = *rt; rti->base != NULL; rti++)
478
      if (fwrite (rti->base, rti->stride, 1, f) != 1)
479
        fatal_error ("can't write PCH file: %m");
480
 
481
  /* Write out all the global pointers, after translation.  */
482
  write_pch_globals (gt_ggc_rtab, &state);
483
  write_pch_globals (gt_pch_cache_rtab, &state);
484
 
485
  /* Pad the PCH file so that the mmapped area starts on an allocation
486
     granularity (usually page) boundary.  */
487
  {
488
    long o;
489
    o = ftell (state.f) + sizeof (mmi);
490
    if (o == -1)
491
      fatal_error ("can't get position in PCH file: %m");
492
    mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
493
    if (mmi.offset == mmap_offset_alignment)
494
      mmi.offset = 0;
495
    mmi.offset += o;
496
  }
497
  if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
498
    fatal_error ("can't write PCH file: %m");
499
  if (mmi.offset != 0
500
      && fseek (state.f, mmi.offset, SEEK_SET) != 0)
501
    fatal_error ("can't write padding to PCH file: %m");
502
 
503
  ggc_pch_prepare_write (state.d, state.f);
504
 
505
  /* Actually write out the objects.  */
506
  for (i = 0; i < state.count; i++)
507
    {
508
      if (this_object_size < state.ptrs[i]->size)
509
        {
510
          this_object_size = state.ptrs[i]->size;
511
          this_object = xrealloc (this_object, this_object_size);
512
        }
513
      memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
514
      if (state.ptrs[i]->reorder_fn != NULL)
515
        state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
516
                                   state.ptrs[i]->note_ptr_cookie,
517
                                   relocate_ptrs, &state);
518
      state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
519
                                  state.ptrs[i]->note_ptr_cookie,
520
                                  relocate_ptrs, &state);
521
      ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
522
                            state.ptrs[i]->new_addr, state.ptrs[i]->size,
523
                            state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
524
      if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
525
        memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
526
    }
527
  ggc_pch_finish (state.d, state.f);
528
  gt_pch_fixup_stringpool ();
529
 
530
  free (state.ptrs);
531
  htab_delete (saving_htab);
532
}
533
 
534
/* Read the state of the compiler back in from F.  */
535
 
536
void
537
gt_pch_restore (FILE *f)
538
{
539
  const struct ggc_root_tab *const *rt;
540
  const struct ggc_root_tab *rti;
541
  size_t i;
542
  struct mmap_info mmi;
543
  int result;
544
 
545
  /* Delete any deletable objects.  This makes ggc_pch_read much
546
     faster, as it can be sure that no GCable objects remain other
547
     than the ones just read in.  */
548
  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
549
    for (rti = *rt; rti->base != NULL; rti++)
550
      memset (rti->base, 0, rti->stride);
551
 
552
  /* Read in all the scalar variables.  */
553
  for (rt = gt_pch_scalar_rtab; *rt; rt++)
554
    for (rti = *rt; rti->base != NULL; rti++)
555
      if (fread (rti->base, rti->stride, 1, f) != 1)
556
        fatal_error ("can't read PCH file: %m");
557
 
558
  /* Read in all the global pointers, in 6 easy loops.  */
559
  for (rt = gt_ggc_rtab; *rt; rt++)
560
    for (rti = *rt; rti->base != NULL; rti++)
561
      for (i = 0; i < rti->nelt; i++)
562
        if (fread ((char *)rti->base + rti->stride * i,
563
                   sizeof (void *), 1, f) != 1)
564
          fatal_error ("can't read PCH file: %m");
565
 
566
  for (rt = gt_pch_cache_rtab; *rt; rt++)
567
    for (rti = *rt; rti->base != NULL; rti++)
568
      for (i = 0; i < rti->nelt; i++)
569
        if (fread ((char *)rti->base + rti->stride * i,
570
                   sizeof (void *), 1, f) != 1)
571
          fatal_error ("can't read PCH file: %m");
572
 
573
  if (fread (&mmi, sizeof (mmi), 1, f) != 1)
574
    fatal_error ("can't read PCH file: %m");
575
 
576
  result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
577
                                          fileno (f), mmi.offset);
578
  if (result < 0)
579
    fatal_error ("had to relocate PCH");
580
  if (result == 0)
581
    {
582
      if (fseek (f, mmi.offset, SEEK_SET) != 0
583
          || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
584
        fatal_error ("can't read PCH file: %m");
585
    }
586
  else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
587
    fatal_error ("can't read PCH file: %m");
588
 
589
  ggc_pch_read (f, mmi.preferred_base);
590
 
591
  gt_pch_restore_stringpool ();
592
}
593
 
594
/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
595
   Select no address whatsoever, and let gt_pch_save choose what it will with
596
   malloc, presumably.  */
597
 
598
void *
599
default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
600
                            int fd ATTRIBUTE_UNUSED)
601
{
602
  return NULL;
603
}
604
 
605
/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
606
   Allocate SIZE bytes with malloc.  Return 0 if the address we got is the
607
   same as base, indicating that the memory has been allocated but needs to
608
   be read in from the file.  Return -1 if the address differs, to relocation
609
   of the PCH file would be required.  */
610
 
611
int
612
default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
613
                            size_t offset ATTRIBUTE_UNUSED)
614
{
615
  void *addr = xmalloc (size);
616
  return (addr == base) - 1;
617
}
618
 
619
/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS.   Return the
620
   alignment required for allocating virtual memory. Usually this is the
621
   same as pagesize.  */
622
 
623
size_t
624
default_gt_pch_alloc_granularity (void)
625
{
626
  return getpagesize();
627
}
628
 
629
#if HAVE_MMAP_FILE
630
/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
631
   We temporarily allocate SIZE bytes, and let the kernel place the data
632
   wherever it will.  If it worked, that's our spot, if not we're likely
633
   to be in trouble.  */
634
 
635
void *
636
mmap_gt_pch_get_address (size_t size, int fd)
637
{
638
  void *ret;
639
 
640
  ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
641
  if (ret == (void *) MAP_FAILED)
642
    ret = NULL;
643
  else
644
    munmap (ret, size);
645
 
646
  return ret;
647
}
648
 
649
/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
650
   Map SIZE bytes of FD+OFFSET at BASE.  Return 1 if we succeeded at
651
   mapping the data at BASE, -1 if we couldn't.
652
 
653
   This version assumes that the kernel honors the START operand of mmap
654
   even without MAP_FIXED if START through START+SIZE are not currently
655
   mapped with something.  */
656
 
657
int
658
mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
659
{
660
  void *addr;
661
 
662
  /* We're called with size == 0 if we're not planning to load a PCH
663
     file at all.  This allows the hook to free any static space that
664
     we might have allocated at link time.  */
665
  if (size == 0)
666
    return -1;
667
 
668
  addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
669
               fd, offset);
670
 
671
  return addr == base ? 1 : -1;
672
}
673
#endif /* HAVE_MMAP_FILE */
674
 
675
/* Modify the bound based on rlimits.  */
676
static double
677
ggc_rlimit_bound (double limit)
678
{
679
#if defined(HAVE_GETRLIMIT)
680
  struct rlimit rlim;
681
# if defined (RLIMIT_AS)
682
  /* RLIMIT_AS is what POSIX says is the limit on mmap.  Presumably
683
     any OS which has RLIMIT_AS also has a working mmap that GCC will use.  */
684
  if (getrlimit (RLIMIT_AS, &rlim) == 0
685
      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
686
      && rlim.rlim_cur < limit)
687
    limit = rlim.rlim_cur;
688
# elif defined (RLIMIT_DATA)
689
  /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
690
     might be on an OS that has a broken mmap.  (Others don't bound
691
     mmap at all, apparently.)  */
692
  if (getrlimit (RLIMIT_DATA, &rlim) == 0
693
      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
694
      && rlim.rlim_cur < limit
695
      /* Darwin has this horribly bogus default setting of
696
         RLIMIT_DATA, to 6144Kb.  No-one notices because RLIMIT_DATA
697
         appears to be ignored.  Ignore such silliness.  If a limit
698
         this small was actually effective for mmap, GCC wouldn't even
699
         start up.  */
700
      && rlim.rlim_cur >= 8 * 1024 * 1024)
701
    limit = rlim.rlim_cur;
702
# endif /* RLIMIT_AS or RLIMIT_DATA */
703
#endif /* HAVE_GETRLIMIT */
704
 
705
  return limit;
706
}
707
 
708
/* Heuristic to set a default for GGC_MIN_EXPAND.  */
709
int
710
ggc_min_expand_heuristic (void)
711
{
712
  double min_expand = physmem_total();
713
 
714
  /* Adjust for rlimits.  */
715
  min_expand = ggc_rlimit_bound (min_expand);
716
 
717
  /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
718
     a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
719
  min_expand /= 1024*1024*1024;
720
  min_expand *= 70;
721
  min_expand = MIN (min_expand, 70);
722
  min_expand += 30;
723
 
724
  return min_expand;
725
}
726
 
727
/* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
728
int
729
ggc_min_heapsize_heuristic (void)
730
{
731
  double phys_kbytes = physmem_total();
732
  double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
733
 
734
  phys_kbytes /= 1024; /* Convert to Kbytes.  */
735
  limit_kbytes /= 1024;
736
 
737
  /* The heuristic is RAM/8, with a lower bound of 4M and an upper
738
     bound of 128M (when RAM >= 1GB).  */
739
  phys_kbytes /= 8;
740
 
741
#if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
742
  /* Try not to overrun the RSS limit while doing garbage collection.
743
     The RSS limit is only advisory, so no margin is subtracted.  */
744
 {
745
   struct rlimit rlim;
746
   if (getrlimit (RLIMIT_RSS, &rlim) == 0
747
       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
748
     phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
749
 }
750
# endif
751
 
752
  /* Don't blindly run over our data limit; do GC at least when the
753
     *next* GC would be within 16Mb of the limit.  If GCC does hit the
754
     data limit, compilation will fail, so this tries to be
755
     conservative.  */
756
  limit_kbytes = MAX (0, limit_kbytes - 16 * 1024);
757
  limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic());
758
  phys_kbytes = MIN (phys_kbytes, limit_kbytes);
759
 
760
  phys_kbytes = MAX (phys_kbytes, 4 * 1024);
761
  phys_kbytes = MIN (phys_kbytes, 128 * 1024);
762
 
763
  return phys_kbytes;
764
}
765
 
766
void
767
init_ggc_heuristics (void)
768
{
769
#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
770
  set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
771
  set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
772
#endif
773
}
774
 
775
#ifdef GATHER_STATISTICS
776
 
777
/* Datastructure used to store per-call-site statistics.  */
778
struct loc_descriptor
779
{
780
  const char *file;
781
  int line;
782
  const char *function;
783
  int times;
784
  size_t allocated;
785
  size_t overhead;
786
  size_t freed;
787
  size_t collected;
788
};
789
 
790
/* Hashtable used for statistics.  */
791
static htab_t loc_hash;
792
 
793
/* Hash table helpers functions.  */
794
static hashval_t
795
hash_descriptor (const void *p)
796
{
797
  const struct loc_descriptor *d = p;
798
 
799
  return htab_hash_pointer (d->function) | d->line;
800
}
801
 
802
static int
803
eq_descriptor (const void *p1, const void *p2)
804
{
805
  const struct loc_descriptor *d = p1;
806
  const struct loc_descriptor *d2 = p2;
807
 
808
  return (d->file == d2->file && d->line == d2->line
809
          && d->function == d2->function);
810
}
811
 
812
/* Hashtable converting address of allocated field to loc descriptor.  */
813
static htab_t ptr_hash;
814
struct ptr_hash_entry
815
{
816
  void *ptr;
817
  struct loc_descriptor *loc;
818
  size_t size;
819
};
820
 
821
/* Hash table helpers functions.  */
822
static hashval_t
823
hash_ptr (const void *p)
824
{
825
  const struct ptr_hash_entry *d = p;
826
 
827
  return htab_hash_pointer (d->ptr);
828
}
829
 
830
static int
831
eq_ptr (const void *p1, const void *p2)
832
{
833
  const struct ptr_hash_entry *p = p1;
834
 
835
  return (p->ptr == p2);
836
}
837
 
838
/* Return descriptor for given call site, create new one if needed.  */
839
static struct loc_descriptor *
840
loc_descriptor (const char *name, int line, const char *function)
841
{
842
  struct loc_descriptor loc;
843
  struct loc_descriptor **slot;
844
 
845
  loc.file = name;
846
  loc.line = line;
847
  loc.function = function;
848
  if (!loc_hash)
849
    loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
850
 
851
  slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1);
852
  if (*slot)
853
    return *slot;
854
  *slot = xcalloc (sizeof (**slot), 1);
855
  (*slot)->file = name;
856
  (*slot)->line = line;
857
  (*slot)->function = function;
858
  return *slot;
859
}
860
 
861
/* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION).  */
862
void
863
ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
864
                     const char *name, int line, const char *function)
865
{
866
  struct loc_descriptor *loc = loc_descriptor (name, line, function);
867
  struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
868
  PTR *slot;
869
 
870
  p->ptr = ptr;
871
  p->loc = loc;
872
  p->size = allocated + overhead;
873
  if (!ptr_hash)
874
    ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
875
  slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
876
  gcc_assert (!*slot);
877
  *slot = p;
878
 
879
  loc->times++;
880
  loc->allocated+=allocated;
881
  loc->overhead+=overhead;
882
}
883
 
884
/* Helper function for prune_overhead_list.  See if SLOT is still marked and
885
   remove it from hashtable if it is not.  */
886
static int
887
ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
888
{
889
  struct ptr_hash_entry *p = *slot;
890
  if (!ggc_marked_p (p->ptr))
891
    {
892
      p->loc->collected += p->size;
893
      htab_clear_slot (ptr_hash, slot);
894
      free (p);
895
    }
896
  return 1;
897
}
898
 
899
/* After live values has been marked, walk all recorded pointers and see if
900
   they are still live.  */
901
void
902
ggc_prune_overhead_list (void)
903
{
904
  htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
905
}
906
 
907
/* Notice that the pointer has been freed.  */
908
void
909
ggc_free_overhead (void *ptr)
910
{
911
  PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
912
                                        NO_INSERT);
913
  struct ptr_hash_entry *p = *slot;
914
  p->loc->freed += p->size;
915
  htab_clear_slot (ptr_hash, slot);
916
  free (p);
917
}
918
 
919
/* Helper for qsort; sort descriptors by amount of memory consumed.  */
920
static int
921
cmp_statistic (const void *loc1, const void *loc2)
922
{
923
  struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1;
924
  struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2;
925
  return ((l1->allocated + l1->overhead - l1->freed) -
926
          (l2->allocated + l2->overhead - l2->freed));
927
}
928
 
929
/* Collect array of the descriptors from hashtable.  */
930
struct loc_descriptor **loc_array;
931
static int
932
add_statistics (void **slot, void *b)
933
{
934
  int *n = (int *)b;
935
  loc_array[*n] = (struct loc_descriptor *) *slot;
936
  (*n)++;
937
  return 1;
938
}
939
 
940
/* Dump per-site memory statistics.  */
941
#endif
942
void
943
dump_ggc_loc_statistics (void)
944
{
945
#ifdef GATHER_STATISTICS
946
  int nentries = 0;
947
  char s[4096];
948
  size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
949
  int i;
950
 
951
  ggc_force_collect = true;
952
  ggc_collect ();
953
 
954
  loc_array = xcalloc (sizeof (*loc_array), loc_hash->n_elements);
955
  fprintf (stderr, "-------------------------------------------------------\n");
956
  fprintf (stderr, "\n%-48s %10s       %10s       %10s       %10s       %10s\n",
957
           "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
958
  fprintf (stderr, "-------------------------------------------------------\n");
959
  htab_traverse (loc_hash, add_statistics, &nentries);
960
  qsort (loc_array, nentries, sizeof (*loc_array), cmp_statistic);
961
  for (i = 0; i < nentries; i++)
962
    {
963
      struct loc_descriptor *d = loc_array[i];
964
      allocated += d->allocated;
965
      times += d->times;
966
      freed += d->freed;
967
      collected += d->collected;
968
      overhead += d->overhead;
969
    }
970
  for (i = 0; i < nentries; i++)
971
    {
972
      struct loc_descriptor *d = loc_array[i];
973
      if (d->allocated)
974
        {
975
          const char *s1 = d->file;
976
          const char *s2;
977
          while ((s2 = strstr (s1, "gcc/")))
978
            s1 = s2 + 4;
979
          sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
980
          s[48] = 0;
981
          fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
982
                   (long)d->collected,
983
                   (d->collected) * 100.0 / collected,
984
                   (long)d->freed,
985
                   (d->freed) * 100.0 / freed,
986
                   (long)(d->allocated + d->overhead - d->freed - d->collected),
987
                   (d->allocated + d->overhead - d->freed - d->collected) * 100.0
988
                   / (allocated + overhead - freed - collected),
989
                   (long)d->overhead,
990
                   d->overhead * 100.0 / overhead,
991
                   (long)d->times);
992
        }
993
    }
994
  fprintf (stderr, "%-48s %10ld       %10ld       %10ld       %10ld       %10ld\n",
995
           "Total", (long)collected, (long)freed,
996
           (long)(allocated + overhead - freed - collected), (long)overhead,
997
           (long)times);
998
  fprintf (stderr, "%-48s %10s       %10s       %10s       %10s       %10s\n",
999
           "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1000
  fprintf (stderr, "-------------------------------------------------------\n");
1001
#endif
1002
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.