OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [lto-streamer.c] - Blame information for rev 533

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Miscellaneous utilities for GIMPLE streaming.  Things that are used
2
   in both input and output are here.
3
 
4
   Copyright 2009, 2010 Free Software Foundation, Inc.
5
   Contributed by Doug Kwan <dougkwan@google.com>
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify it under
10
the terms of the GNU General Public License as published by the Free
11
Software Foundation; either version 3, or (at your option) any later
12
version.
13
 
14
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15
WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17
for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with GCC; see the file COPYING3.  If not see
21
<http://www.gnu.org/licenses/>.  */
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "toplev.h"
28
#include "flags.h"
29
#include "tree.h"
30
#include "gimple.h"
31
#include "tree-flow.h"
32
#include "diagnostic.h"
33
#include "bitmap.h"
34
#include "vec.h"
35
#include "lto-streamer.h"
36
 
37
/* Statistics gathered during LTO, WPA and LTRANS.  */
38
struct lto_stats_d lto_stats;
39
 
40
/* LTO uses bitmaps with different life-times.  So use a seperate
41
   obstack for all LTO bitmaps.  */
42
static bitmap_obstack lto_obstack;
43
static bool lto_obstack_initialized;
44
 
45
 
46
/* Return a string representing LTO tag TAG.  */
47
 
48
const char *
49
lto_tag_name (enum LTO_tags tag)
50
{
51
  if (lto_tag_is_tree_code_p (tag))
52
    {
53
      /* For tags representing tree nodes, return the name of the
54
         associated tree code.  */
55
      return tree_code_name[lto_tag_to_tree_code (tag)];
56
    }
57
 
58
  if (lto_tag_is_gimple_code_p (tag))
59
    {
60
      /* For tags representing gimple statements, return the name of
61
         the associated gimple code.  */
62
      return gimple_code_name[lto_tag_to_gimple_code (tag)];
63
    }
64
 
65
  switch (tag)
66
    {
67
    case LTO_null:
68
      return "LTO_null";
69
    case LTO_bb0:
70
      return "LTO_bb0";
71
    case LTO_bb1:
72
      return "LTO_bb1";
73
    case LTO_eh_region:
74
      return "LTO_eh_region";
75
    case LTO_function:
76
      return "LTO_function";
77
    case LTO_eh_table:
78
      return "LTO_eh_table";
79
    case LTO_ert_cleanup:
80
      return "LTO_ert_cleanup";
81
    case LTO_ert_try:
82
      return "LTO_ert_try";
83
    case LTO_ert_allowed_exceptions:
84
      return "LTO_ert_allowed_exceptions";
85
    case LTO_ert_must_not_throw:
86
      return "LTO_ert_must_not_throw";
87
    case LTO_tree_pickle_reference:
88
      return "LTO_tree_pickle_reference";
89
    case LTO_field_decl_ref:
90
      return "LTO_field_decl_ref";
91
    case LTO_function_decl_ref:
92
      return "LTO_function_decl_ref";
93
    case LTO_label_decl_ref:
94
      return "LTO_label_decl_ref";
95
    case LTO_namespace_decl_ref:
96
      return "LTO_namespace_decl_ref";
97
    case LTO_result_decl_ref:
98
      return "LTO_result_decl_ref";
99
    case LTO_ssa_name_ref:
100
      return "LTO_ssa_name_ref";
101
    case LTO_type_decl_ref:
102
      return "LTO_type_decl_ref";
103
    case LTO_type_ref:
104
      return "LTO_type_ref";
105
    case LTO_global_decl_ref:
106
      return "LTO_global_decl_ref";
107
    default:
108
      return "LTO_UNKNOWN";
109
    }
110
}
111
 
112
 
113
/* Allocate a bitmap from heap.  Initializes the LTO obstack if necessary.  */
114
 
115
bitmap
116
lto_bitmap_alloc (void)
117
{
118
  if (!lto_obstack_initialized)
119
    {
120
      bitmap_obstack_initialize (&lto_obstack);
121
      lto_obstack_initialized = true;
122
    }
123
  return BITMAP_ALLOC (&lto_obstack);
124
}
125
 
126
/* Free bitmap B.  */
127
 
128
void
129
lto_bitmap_free (bitmap b)
130
{
131
  BITMAP_FREE (b);
132
}
133
 
134
 
135
/* Get a section name for a particular type or name.  The NAME field
136
   is only used if SECTION_TYPE is LTO_section_function_body or
137
   LTO_static_initializer.  For all others it is ignored.  The callee
138
   of this function is responcible to free the returned name.  */
139
 
140
char *
141
lto_get_section_name (int section_type, const char *name)
142
{
143
  switch (section_type)
144
    {
145
    case LTO_section_function_body:
146
      gcc_assert (name != NULL);
147
      if (name[0] == '*')
148
        name++;
149
      return concat (LTO_SECTION_NAME_PREFIX, name, NULL);
150
 
151
    case LTO_section_static_initializer:
152
      return concat (LTO_SECTION_NAME_PREFIX, ".statics", NULL);
153
 
154
    case LTO_section_symtab:
155
      return concat (LTO_SECTION_NAME_PREFIX, ".symtab", NULL);
156
 
157
    case LTO_section_decls:
158
      return concat (LTO_SECTION_NAME_PREFIX, ".decls", NULL);
159
 
160
    case LTO_section_cgraph:
161
      return concat (LTO_SECTION_NAME_PREFIX, ".cgraph", NULL);
162
 
163
    case LTO_section_jump_functions:
164
      return concat (LTO_SECTION_NAME_PREFIX, ".jmpfuncs", NULL);
165
 
166
    case LTO_section_ipa_pure_const:
167
      return concat (LTO_SECTION_NAME_PREFIX, ".pureconst", NULL);
168
 
169
    case LTO_section_ipa_reference:
170
      return concat (LTO_SECTION_NAME_PREFIX, ".reference", NULL);
171
 
172
    case LTO_section_wpa_fixup:
173
      return concat (LTO_SECTION_NAME_PREFIX, ".wpa_fixup", NULL);
174
 
175
    case LTO_section_opts:
176
      return concat (LTO_SECTION_NAME_PREFIX, ".opts", NULL);
177
 
178
    default:
179
      internal_error ("bytecode stream: unexpected LTO section %s", name);
180
    }
181
}
182
 
183
 
184
/* Show various memory usage statistics related to LTO.  */
185
 
186
void
187
print_lto_report (void)
188
{
189
  const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
190
  unsigned i;
191
 
192
  fprintf (stderr, "%s statistics\n", s);
193
  fprintf (stderr, "[%s] # of input files: "
194
           HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
195
 
196
  fprintf (stderr, "[%s] # of input cgraph nodes: "
197
           HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
198
           lto_stats.num_input_cgraph_nodes);
199
 
200
  fprintf (stderr, "[%s] # of function bodies: "
201
           HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
202
           lto_stats.num_function_bodies);
203
 
204
  fprintf (stderr, "[%s] ", s);
205
  print_gimple_types_stats ();
206
 
207
  for (i = 0; i < NUM_TREE_CODES; i++)
208
    if (lto_stats.num_trees[i])
209
      fprintf (stderr, "[%s] # of '%s' objects read: "
210
               HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
211
               tree_code_name[i], lto_stats.num_trees[i]);
212
 
213
  if (flag_lto)
214
    {
215
      fprintf (stderr, "[%s] Compression: "
216
               HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, "
217
               HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s,
218
               lto_stats.num_output_il_bytes,
219
               lto_stats.num_compressed_il_bytes);
220
      if (lto_stats.num_output_il_bytes > 0)
221
        {
222
          const float dividend = (float) lto_stats.num_compressed_il_bytes;
223
          const float divisor = (float) lto_stats.num_output_il_bytes;
224
          fprintf (stderr, " (ratio: %f)", dividend / divisor);
225
        }
226
      fprintf (stderr, "\n");
227
    }
228
 
229
  if (flag_wpa)
230
    {
231
      fprintf (stderr, "[%s] # of output files: "
232
               HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
233
               lto_stats.num_output_files);
234
 
235
      fprintf (stderr, "[%s] # of output cgraph nodes: "
236
               HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
237
               lto_stats.num_output_cgraph_nodes);
238
 
239
      fprintf (stderr, "[%s] # callgraph partitions: "
240
               HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
241
               lto_stats.num_cgraph_partitions);
242
 
243
      fprintf (stderr, "[%s] Compression: "
244
               HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, "
245
               HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s,
246
               lto_stats.num_input_il_bytes,
247
               lto_stats.num_uncompressed_il_bytes);
248
      if (lto_stats.num_input_il_bytes > 0)
249
        {
250
          const float dividend = (float) lto_stats.num_uncompressed_il_bytes;
251
          const float divisor = (float) lto_stats.num_input_il_bytes;
252
          fprintf (stderr, " (ratio: %f)", dividend / divisor);
253
        }
254
      fprintf (stderr, "\n");
255
    }
256
 
257
  for (i = 0; i < LTO_N_SECTION_TYPES; i++)
258
    fprintf (stderr, "[%s] Size of mmap'd section %s: "
259
             HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s,
260
             lto_section_name[i], lto_stats.section_size[i]);
261
}
262
 
263
 
264
/* Create a new bitpack.  */
265
 
266
struct bitpack_d *
267
bitpack_create (void)
268
{
269
  return XCNEW (struct bitpack_d);
270
}
271
 
272
 
273
/* Free the memory used by bitpack BP.  */
274
 
275
void
276
bitpack_delete (struct bitpack_d *bp)
277
{
278
  VEC_free (bitpack_word_t, heap, bp->values);
279
  free (bp);
280
}
281
 
282
 
283
/* Return an index to the word in bitpack BP that contains the
284
   next NBITS.  */
285
 
286
static inline unsigned
287
bp_get_next_word (struct bitpack_d *bp, unsigned nbits)
288
{
289
  unsigned last, ix;
290
 
291
  /* In principle, the next word to use is determined by the
292
     number of bits already processed in BP.  */
293
  ix = bp->num_bits / BITS_PER_BITPACK_WORD;
294
 
295
  /* All the encoded bit patterns in BP are contiguous, therefore if
296
     the next NBITS would straddle over two different words, move the
297
     index to the next word and update the number of encoded bits
298
     by adding up the hole of unused bits created by this move.  */
299
  bp->first_unused_bit %= BITS_PER_BITPACK_WORD;
300
  last = bp->first_unused_bit + nbits - 1;
301
  if (last >= BITS_PER_BITPACK_WORD)
302
    {
303
      ix++;
304
      bp->num_bits += (BITS_PER_BITPACK_WORD - bp->first_unused_bit);
305
      bp->first_unused_bit = 0;
306
    }
307
 
308
  return ix;
309
}
310
 
311
 
312
/* Pack NBITS of value VAL into bitpack BP.  */
313
 
314
void
315
bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
316
{
317
  unsigned ix;
318
  bitpack_word_t word;
319
 
320
  /* We cannot encode more bits than BITS_PER_BITPACK_WORD.  */
321
  gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
322
 
323
  /* Compute which word will contain the next NBITS.  */
324
  ix = bp_get_next_word (bp, nbits);
325
  if (ix >= VEC_length (bitpack_word_t, bp->values))
326
    {
327
      /* If there is no room left in the last word of the values
328
         array, add a new word.  Additionally, we should only
329
         need to add a single word, since every pack operation cannot
330
         use more bits than fit in a single word.  */
331
      gcc_assert (ix < VEC_length (bitpack_word_t, bp->values) + 1);
332
      VEC_safe_push (bitpack_word_t, heap, bp->values, 0);
333
    }
334
 
335
  /* Grab the last word to pack VAL into.  */
336
  word = VEC_index (bitpack_word_t, bp->values, ix);
337
 
338
  /* To fit VAL in WORD, we need to shift VAL to the left to
339
     skip the bottom BP->FIRST_UNUSED_BIT bits.  */
340
  gcc_assert (BITS_PER_BITPACK_WORD >= bp->first_unused_bit + nbits);
341
  val <<= bp->first_unused_bit;
342
 
343
  /* Update WORD with VAL.  */
344
  word |= val;
345
 
346
  /* Update BP.  */
347
  VEC_replace (bitpack_word_t, bp->values, ix, word);
348
  bp->num_bits += nbits;
349
  bp->first_unused_bit += nbits;
350
}
351
 
352
 
353
/* Unpack the next NBITS from bitpack BP.  */
354
 
355
bitpack_word_t
356
bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
357
{
358
  bitpack_word_t val, word, mask;
359
  unsigned ix;
360
 
361
  /* We cannot decode more bits than BITS_PER_BITPACK_WORD.  */
362
  gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
363
 
364
  /* Compute which word contains the next NBITS.  */
365
  ix = bp_get_next_word (bp, nbits);
366
  word = VEC_index (bitpack_word_t, bp->values, ix);
367
 
368
  /* Compute the mask to get NBITS from WORD.  */
369
  mask = (nbits == BITS_PER_BITPACK_WORD)
370
         ? (bitpack_word_t) -1
371
         : ((bitpack_word_t) 1 << nbits) - 1;
372
 
373
  /* Shift WORD to the right to skip over the bits already decoded
374
     in word.  */
375
  word >>= bp->first_unused_bit;
376
 
377
  /* Apply the mask to obtain the requested value.  */
378
  val = word & mask;
379
 
380
  /* Update BP->NUM_BITS for the next unpack operation.  */
381
  bp->num_bits += nbits;
382
  bp->first_unused_bit += nbits;
383
 
384
  return val;
385
}
386
 
387
 
388
/* Check that all the TS_* structures handled by the lto_output_* and
389
   lto_input_* routines are exactly ALL the structures defined in
390
   treestruct.def.  */
391
 
392
static void
393
check_handled_ts_structures (void)
394
{
395
  bool handled_p[LAST_TS_ENUM];
396
  unsigned i;
397
 
398
  memset (&handled_p, 0, sizeof (handled_p));
399
 
400
  /* These are the TS_* structures that are either handled or
401
     explicitly ignored by the streamer routines.  */
402
  handled_p[TS_BASE] = true;
403
  handled_p[TS_COMMON] = true;
404
  handled_p[TS_INT_CST] = true;
405
  handled_p[TS_REAL_CST] = true;
406
  handled_p[TS_FIXED_CST] = true;
407
  handled_p[TS_VECTOR] = true;
408
  handled_p[TS_STRING] = true;
409
  handled_p[TS_COMPLEX] = true;
410
  handled_p[TS_IDENTIFIER] = true;
411
  handled_p[TS_DECL_MINIMAL] = true;
412
  handled_p[TS_DECL_COMMON] = true;
413
  handled_p[TS_DECL_WRTL] = true;
414
  handled_p[TS_DECL_NON_COMMON] = true;
415
  handled_p[TS_DECL_WITH_VIS] = true;
416
  handled_p[TS_FIELD_DECL] = true;
417
  handled_p[TS_VAR_DECL] = true;
418
  handled_p[TS_PARM_DECL] = true;
419
  handled_p[TS_LABEL_DECL] = true;
420
  handled_p[TS_RESULT_DECL] = true;
421
  handled_p[TS_CONST_DECL] = true;
422
  handled_p[TS_TYPE_DECL] = true;
423
  handled_p[TS_FUNCTION_DECL] = true;
424
  handled_p[TS_TYPE] = true;
425
  handled_p[TS_LIST] = true;
426
  handled_p[TS_VEC] = true;
427
  handled_p[TS_EXP] = true;
428
  handled_p[TS_SSA_NAME] = true;
429
  handled_p[TS_BLOCK] = true;
430
  handled_p[TS_BINFO] = true;
431
  handled_p[TS_STATEMENT_LIST] = true;
432
  handled_p[TS_CONSTRUCTOR] = true;
433
  handled_p[TS_OMP_CLAUSE] = true;
434
  handled_p[TS_OPTIMIZATION] = true;
435
  handled_p[TS_TARGET_OPTION] = true;
436
 
437
  /* Anything not marked above will trigger the following assertion.
438
     If this assertion triggers, it means that there is a new TS_*
439
     structure that should be handled by the streamer.  */
440
  for (i = 0; i < LAST_TS_ENUM; i++)
441
    gcc_assert (handled_p[i]);
442
}
443
 
444
 
445
/* Helper for lto_streamer_cache_insert_1.  Add T to CACHE->NODES at
446
   slot IX.  Add OFFSET to CACHE->OFFSETS at slot IX.  */
447
 
448
static void
449
lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d *cache,
450
                                      int ix, tree t, unsigned offset)
451
{
452
  gcc_assert (ix >= 0);
453
 
454
  /* Grow the array of nodes and offsets to accomodate T at IX.  */
455
  if (ix >= (int) VEC_length (tree, cache->nodes))
456
    {
457
      size_t sz = ix + (20 + ix) / 4;
458
      VEC_safe_grow_cleared (tree, gc, cache->nodes, sz);
459
      VEC_safe_grow_cleared (unsigned, heap, cache->offsets, sz);
460
    }
461
 
462
  VEC_replace (tree, cache->nodes, ix, t);
463
  VEC_replace (unsigned, cache->offsets, ix, offset);
464
}
465
 
466
 
467
/* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at.
468
   CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert.
469
 
470
   If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available
471
   slot in the cache.  Otherwise, T is inserted at the position indicated
472
   in *IX_P.
473
 
474
   If T already existed in CACHE, return true.  Otherwise,
475
   return false.  */
476
 
477
static bool
478
lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache,
479
                             tree t, int *ix_p, unsigned *offset_p,
480
                             bool insert_at_next_slot_p)
481
{
482
  void **slot;
483
  struct tree_int_map d_entry, *entry;
484
  int ix;
485
  unsigned offset;
486
  bool existed_p;
487
 
488
  gcc_assert (t);
489
 
490
  d_entry.base.from = t;
491
  slot = htab_find_slot (cache->node_map, &d_entry, INSERT);
492
  if (*slot == NULL)
493
    {
494
      /* Determine the next slot to use in the cache.  */
495
      if (insert_at_next_slot_p)
496
        ix = cache->next_slot++;
497
      else
498
        ix = *ix_p;
499
 
500
      entry = XCNEW (struct tree_int_map);
501
      entry->base.from = t;
502
      entry->to = (unsigned) ix;
503
      *slot = entry;
504
 
505
      /* If no offset was given, store the invalid offset -1.  */
506
      offset = (offset_p) ? *offset_p : (unsigned) -1;
507
 
508
      lto_streamer_cache_add_to_node_array (cache, ix, t, offset);
509
 
510
      /* Indicate that the item was not present in the cache.  */
511
      existed_p = false;
512
    }
513
  else
514
    {
515
      entry = (struct tree_int_map *) *slot;
516
      ix = (int) entry->to;
517
      offset = VEC_index (unsigned, cache->offsets, ix);
518
 
519
      if (!insert_at_next_slot_p && ix != *ix_p)
520
        {
521
          /* If the caller wants to insert T at a specific slot
522
             location, and ENTRY->TO does not match *IX_P, add T to
523
             the requested location slot.  This situation arises when
524
             streaming builtin functions.
525
 
526
             For instance, on the writer side we could have two
527
             FUNCTION_DECLS T1 and T2 that are represented by the same
528
             builtin function.  The reader will only instantiate the
529
             canonical builtin, but since T1 and T2 had been
530
             originally stored in different cache slots (S1 and S2),
531
             the reader must be able to find the canonical builtin
532
             function at slots S1 and S2.  */
533
          gcc_assert (lto_stream_as_builtin_p (t));
534
          ix = *ix_p;
535
 
536
          /* Since we are storing a builtin, the offset into the
537
             stream is not necessary as we will not need to read
538
             forward in the stream.  */
539
          lto_streamer_cache_add_to_node_array (cache, ix, t, -1);
540
        }
541
 
542
      /* Indicate that T was already in the cache.  */
543
      existed_p = true;
544
    }
545
 
546
  if (ix_p)
547
    *ix_p = ix;
548
 
549
  if (offset_p)
550
    *offset_p = offset;
551
 
552
  return existed_p;
553
}
554
 
555
 
556
/* Insert tree node T in CACHE.  If T already existed in the cache
557
   return true.  Otherwise, return false.
558
 
559
   If IX_P is non-null, update it with the index into the cache where
560
   T has been stored.
561
 
562
   *OFFSET_P represents the offset in the stream where T is physically
563
   written out.  The first time T is added to the cache, *OFFSET_P is
564
   recorded in the cache together with T.  But if T already existed
565
   in the cache, *OFFSET_P is updated with the value that was recorded
566
   the first time T was added to the cache.
567
 
568
   If OFFSET_P is NULL, it is ignored.  */
569
 
570
bool
571
lto_streamer_cache_insert (struct lto_streamer_cache_d *cache, tree t,
572
                           int *ix_p, unsigned *offset_p)
573
{
574
  return lto_streamer_cache_insert_1 (cache, t, ix_p, offset_p, true);
575
}
576
 
577
 
578
/* Insert tree node T in CACHE at slot IX.  If T already
579
   existed in the cache return true.  Otherwise, return false.  */
580
 
581
bool
582
lto_streamer_cache_insert_at (struct lto_streamer_cache_d *cache,
583
                              tree t, int ix)
584
{
585
  return lto_streamer_cache_insert_1 (cache, t, &ix, NULL, false);
586
}
587
 
588
 
589
/* Return true if tree node T exists in CACHE.  If IX_P is
590
   not NULL, write to *IX_P the index into the cache where T is stored
591
   (-1 if T is not found).  */
592
 
593
bool
594
lto_streamer_cache_lookup (struct lto_streamer_cache_d *cache, tree t,
595
                           int *ix_p)
596
{
597
  void **slot;
598
  struct tree_int_map d_slot;
599
  bool retval;
600
  int ix;
601
 
602
  gcc_assert (t);
603
 
604
  d_slot.base.from = t;
605
  slot = htab_find_slot (cache->node_map, &d_slot, NO_INSERT);
606
  if (slot == NULL)
607
    {
608
      retval = false;
609
      ix = -1;
610
    }
611
  else
612
    {
613
      retval = true;
614
      ix = (int) ((struct tree_int_map *) *slot)->to;
615
    }
616
 
617
  if (ix_p)
618
    *ix_p = ix;
619
 
620
  return retval;
621
}
622
 
623
 
624
/* Return the tree node at slot IX in CACHE.  */
625
 
626
tree
627
lto_streamer_cache_get (struct lto_streamer_cache_d *cache, int ix)
628
{
629
  gcc_assert (cache);
630
 
631
  /* If the reader is requesting an index beyond the length of the
632
     cache, it will need to read ahead.  Return NULL_TREE to indicate
633
     that.  */
634
  if ((unsigned) ix >= VEC_length (tree, cache->nodes))
635
    return NULL_TREE;
636
 
637
  return VEC_index (tree, cache->nodes, (unsigned) ix);
638
}
639
 
640
 
641
/* Record NODE in COMMON_NODES if it is not NULL and is not already in
642
   SEEN_NODES.  */
643
 
644
static void
645
lto_record_common_node (tree *nodep, VEC(tree, heap) **common_nodes,
646
                        struct pointer_set_t *seen_nodes)
647
{
648
  tree node = *nodep;
649
 
650
  if (node == NULL_TREE)
651
    return;
652
 
653
  if (TYPE_P (node))
654
    *nodep = node = gimple_register_type (node);
655
 
656
  /* Return if node is already seen.  */
657
  if (pointer_set_insert (seen_nodes, node))
658
    return;
659
 
660
  VEC_safe_push (tree, heap, *common_nodes, node);
661
 
662
  if (tree_node_can_be_shared (node))
663
    {
664
      if (POINTER_TYPE_P (node)
665
          || TREE_CODE (node) == COMPLEX_TYPE
666
          || TREE_CODE (node) == ARRAY_TYPE)
667
        lto_record_common_node (&TREE_TYPE (node), common_nodes, seen_nodes);
668
    }
669
}
670
 
671
 
672
/* Generate a vector of common nodes and make sure they are merged
673
   properly according to the the gimple type table.  */
674
 
675
static VEC(tree,heap) *
676
lto_get_common_nodes (void)
677
{
678
  unsigned i;
679
  VEC(tree,heap) *common_nodes = NULL;
680
  struct pointer_set_t *seen_nodes;
681
 
682
  /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the
683
     LTO back-end must agree. Currently, the only languages that set this
684
     use the name "main".  */
685
  if (main_identifier_node)
686
    {
687
      const char *main_name = IDENTIFIER_POINTER (main_identifier_node);
688
      gcc_assert (strcmp (main_name, "main") == 0);
689
    }
690
  else
691
    main_identifier_node = get_identifier ("main");
692
 
693
  gcc_assert (ptrdiff_type_node == integer_type_node);
694
 
695
  /* FIXME lto.  In the C++ front-end, fileptr_type_node is defined as a
696
     variant copy of of ptr_type_node, rather than ptr_node itself.  The
697
     distinction should only be relevant to the front-end, so we always
698
     use the C definition here in lto1.
699
 
700
     These should be assured in pass_ipa_free_lang_data.  */
701
  gcc_assert (fileptr_type_node == ptr_type_node);
702
  gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node);
703
 
704
  seen_nodes = pointer_set_create ();
705
 
706
  /* Skip itk_char.  char_type_node is shared with the appropriately
707
     signed variant.  */
708
  for (i = itk_signed_char; i < itk_none; i++)
709
    lto_record_common_node (&integer_types[i], &common_nodes, seen_nodes);
710
 
711
  for (i = 0; i < TYPE_KIND_LAST; i++)
712
    lto_record_common_node (&sizetype_tab[i], &common_nodes, seen_nodes);
713
 
714
  for (i = 0; i < TI_MAX; i++)
715
    lto_record_common_node (&global_trees[i], &common_nodes, seen_nodes);
716
 
717
  pointer_set_destroy (seen_nodes);
718
 
719
  return common_nodes;
720
}
721
 
722
 
723
/* Assign an index to tree node T and enter it in the streamer cache
724
   CACHE.  */
725
 
726
static void
727
preload_common_node (struct lto_streamer_cache_d *cache, tree t)
728
{
729
  gcc_assert (t);
730
 
731
  lto_streamer_cache_insert (cache, t, NULL, NULL);
732
 
733
 /* The FIELD_DECLs of structures should be shared, so that every
734
    COMPONENT_REF uses the same tree node when referencing a field.
735
    Pointer equality between FIELD_DECLs is used by the alias
736
    machinery to compute overlapping memory references (See
737
    nonoverlapping_component_refs_p).  */
738
 if (TREE_CODE (t) == RECORD_TYPE)
739
   {
740
     tree f;
741
 
742
     for (f = TYPE_FIELDS (t); f; f = TREE_CHAIN (f))
743
       preload_common_node (cache, f);
744
   }
745
}
746
 
747
 
748
/* Create a cache of pickled nodes.  */
749
 
750
struct lto_streamer_cache_d *
751
lto_streamer_cache_create (void)
752
{
753
  struct lto_streamer_cache_d *cache;
754
  VEC(tree, heap) *common_nodes;
755
  unsigned i;
756
  tree node;
757
 
758
  cache = XCNEW (struct lto_streamer_cache_d);
759
 
760
  cache->node_map = htab_create (101, tree_int_map_hash, tree_int_map_eq, NULL);
761
 
762
  /* Load all the well-known tree nodes that are always created by
763
     the compiler on startup.  This prevents writing them out
764
     unnecessarily.  */
765
  common_nodes = lto_get_common_nodes ();
766
 
767
  for (i = 0; VEC_iterate (tree, common_nodes, i, node); i++)
768
    preload_common_node (cache, node);
769
 
770
  VEC_free(tree, heap, common_nodes);
771
 
772
  return cache;
773
}
774
 
775
 
776
/* Delete the streamer cache C.  */
777
 
778
void
779
lto_streamer_cache_delete (struct lto_streamer_cache_d *c)
780
{
781
  if (c == NULL)
782
    return;
783
 
784
  htab_delete (c->node_map);
785
  VEC_free (tree, gc, c->nodes);
786
  VEC_free (unsigned, heap, c->offsets);
787
  free (c);
788
}
789
 
790
 
791
#ifdef LTO_STREAMER_DEBUG
792
static htab_t tree_htab;
793
 
794
struct tree_hash_entry
795
{
796
  tree key;
797
  intptr_t value;
798
};
799
 
800
static hashval_t
801
hash_tree (const void *p)
802
{
803
  const struct tree_hash_entry *e = (const struct tree_hash_entry *) p;
804
  return htab_hash_pointer (e->key);
805
}
806
 
807
static int
808
eq_tree (const void *p1, const void *p2)
809
{
810
  const struct tree_hash_entry *e1 = (const struct tree_hash_entry *) p1;
811
  const struct tree_hash_entry *e2 = (const struct tree_hash_entry *) p2;
812
  return (e1->key == e2->key);
813
}
814
#endif
815
 
816
/* Initialization common to the LTO reader and writer.  */
817
 
818
void
819
lto_streamer_init (void)
820
{
821
  /* Check that all the TS_* handled by the reader and writer routines
822
     match exactly the structures defined in treestruct.def.  When a
823
     new TS_* astructure is added, the streamer should be updated to
824
     handle it.  */
825
  check_handled_ts_structures ();
826
 
827
#ifdef LTO_STREAMER_DEBUG
828
  tree_htab = htab_create (31, hash_tree, eq_tree, NULL);
829
#endif
830
}
831
 
832
 
833
/* Gate function for all LTO streaming passes.  */
834
 
835
bool
836
gate_lto_out (void)
837
{
838
  return ((flag_generate_lto || in_lto_p)
839
          /* Don't bother doing anything if the program has errors.  */
840
          && !(errorcount || sorrycount));
841
}
842
 
843
 
844
#ifdef LTO_STREAMER_DEBUG
845
/* Add a mapping between T and ORIG_T, which is the numeric value of
846
   the original address of T as it was seen by the LTO writer.  This
847
   mapping is useful when debugging streaming problems.  A debugging
848
   session can be started on both reader and writer using ORIG_T
849
   as a breakpoint value in both sessions.
850
 
851
   Note that this mapping is transient and only valid while T is
852
   being reconstructed.  Once T is fully built, the mapping is
853
   removed.  */
854
 
855
void
856
lto_orig_address_map (tree t, intptr_t orig_t)
857
{
858
  struct tree_hash_entry ent;
859
  struct tree_hash_entry **slot;
860
 
861
  ent.key = t;
862
  ent.value = orig_t;
863
  slot
864
    = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, INSERT);
865
  gcc_assert (!*slot);
866
  *slot = XNEW (struct tree_hash_entry);
867
  **slot = ent;
868
}
869
 
870
 
871
/* Get the original address of T as it was seen by the writer.  This
872
   is only valid while T is being reconstructed.  */
873
 
874
intptr_t
875
lto_orig_address_get (tree t)
876
{
877
  struct tree_hash_entry ent;
878
  struct tree_hash_entry **slot;
879
 
880
  ent.key = t;
881
  slot
882
    = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
883
  return (slot ? (*slot)->value : 0);
884
}
885
 
886
 
887
/* Clear the mapping of T to its original address.  */
888
 
889
void
890
lto_orig_address_remove (tree t)
891
{
892
  struct tree_hash_entry ent;
893
  struct tree_hash_entry **slot;
894
 
895
  ent.key = t;
896
  slot
897
    = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
898
  gcc_assert (slot);
899
  free (*slot);
900
  htab_clear_slot (tree_htab, (PTR *)slot);
901
}
902
#endif
903
 
904
 
905
/* Check that the version MAJOR.MINOR is the correct version number.  */
906
 
907
void
908
lto_check_version (int major, int minor)
909
{
910
  if (major != LTO_major_version || minor != LTO_minor_version)
911
    fatal_error ("bytecode stream generated with LTO version %d.%d instead "
912
                 "of the expected %d.%d",
913
                 major, minor,
914
                 LTO_major_version, LTO_minor_version);
915
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.