OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [cfganal.c] - Blame information for rev 702

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Control flow graph analysis code for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2010
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
/* This file contains various simple utilities to analyze the CFG.  */
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "rtl.h"
28
#include "obstack.h"
29
#include "hard-reg-set.h"
30
#include "basic-block.h"
31
#include "insn-config.h"
32
#include "recog.h"
33
#include "diagnostic-core.h"
34
#include "tm_p.h"
35
#include "vec.h"
36
#include "vecprim.h"
37
#include "bitmap.h"
38
#include "sbitmap.h"
39
#include "timevar.h"
40
 
41
/* Store the data structures necessary for depth-first search.  */
42
struct depth_first_search_dsS {
43
  /* stack for backtracking during the algorithm */
44
  basic_block *stack;
45
 
46
  /* number of edges in the stack.  That is, positions 0, ..., sp-1
47
     have edges.  */
48
  unsigned int sp;
49
 
50
  /* record of basic blocks already seen by depth-first search */
51
  sbitmap visited_blocks;
52
};
53
typedef struct depth_first_search_dsS *depth_first_search_ds;
54
 
55
static void flow_dfs_compute_reverse_init (depth_first_search_ds);
56
static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds,
57
                                             basic_block);
58
static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds,
59
                                                     basic_block);
60
static void flow_dfs_compute_reverse_finish (depth_first_search_ds);
61
static bool flow_active_insn_p (const_rtx);
62
 
63
/* Like active_insn_p, except keep the return value clobber around
64
   even after reload.  */
65
 
66
static bool
67
flow_active_insn_p (const_rtx insn)
68
{
69
  if (active_insn_p (insn))
70
    return true;
71
 
72
  /* A clobber of the function return value exists for buggy
73
     programs that fail to return a value.  Its effect is to
74
     keep the return value from being live across the entire
75
     function.  If we allow it to be skipped, we introduce the
76
     possibility for register lifetime confusion.  */
77
  if (GET_CODE (PATTERN (insn)) == CLOBBER
78
      && REG_P (XEXP (PATTERN (insn), 0))
79
      && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
80
    return true;
81
 
82
  return false;
83
}
84
 
85
/* Return true if the block has no effect and only forwards control flow to
86
   its single destination.  */
87
 
88
bool
89
forwarder_block_p (const_basic_block bb)
90
{
91
  rtx insn;
92
 
93
  if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
94
      || !single_succ_p (bb))
95
    return false;
96
 
97
  for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
98
    if (INSN_P (insn) && flow_active_insn_p (insn))
99
      return false;
100
 
101
  return (!INSN_P (insn)
102
          || (JUMP_P (insn) && simplejump_p (insn))
103
          || !flow_active_insn_p (insn));
104
}
105
 
106
/* Return nonzero if we can reach target from src by falling through.  */
107
 
108
bool
109
can_fallthru (basic_block src, basic_block target)
110
{
111
  rtx insn = BB_END (src);
112
  rtx insn2;
113
  edge e;
114
  edge_iterator ei;
115
 
116
  if (target == EXIT_BLOCK_PTR)
117
    return true;
118
  if (src->next_bb != target)
119
    return 0;
120
  FOR_EACH_EDGE (e, ei, src->succs)
121
    if (e->dest == EXIT_BLOCK_PTR
122
        && e->flags & EDGE_FALLTHRU)
123
      return 0;
124
 
125
  insn2 = BB_HEAD (target);
126
  if (insn2 && !active_insn_p (insn2))
127
    insn2 = next_active_insn (insn2);
128
 
129
  /* ??? Later we may add code to move jump tables offline.  */
130
  return next_active_insn (insn) == insn2;
131
}
132
 
133
/* Return nonzero if we could reach target from src by falling through,
134
   if the target was made adjacent.  If we already have a fall-through
135
   edge to the exit block, we can't do that.  */
136
bool
137
could_fall_through (basic_block src, basic_block target)
138
{
139
  edge e;
140
  edge_iterator ei;
141
 
142
  if (target == EXIT_BLOCK_PTR)
143
    return true;
144
  FOR_EACH_EDGE (e, ei, src->succs)
145
    if (e->dest == EXIT_BLOCK_PTR
146
        && e->flags & EDGE_FALLTHRU)
147
      return 0;
148
  return true;
149
}
150
 
151
/* Mark the back edges in DFS traversal.
152
   Return nonzero if a loop (natural or otherwise) is present.
153
   Inspired by Depth_First_Search_PP described in:
154
 
155
     Advanced Compiler Design and Implementation
156
     Steven Muchnick
157
     Morgan Kaufmann, 1997
158
 
159
   and heavily borrowed from pre_and_rev_post_order_compute.  */
160
 
161
bool
162
mark_dfs_back_edges (void)
163
{
164
  edge_iterator *stack;
165
  int *pre;
166
  int *post;
167
  int sp;
168
  int prenum = 1;
169
  int postnum = 1;
170
  sbitmap visited;
171
  bool found = false;
172
 
173
  /* Allocate the preorder and postorder number arrays.  */
174
  pre = XCNEWVEC (int, last_basic_block);
175
  post = XCNEWVEC (int, last_basic_block);
176
 
177
  /* Allocate stack for back-tracking up CFG.  */
178
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
179
  sp = 0;
180
 
181
  /* Allocate bitmap to track nodes that have been visited.  */
182
  visited = sbitmap_alloc (last_basic_block);
183
 
184
  /* None of the nodes in the CFG have been visited yet.  */
185
  sbitmap_zero (visited);
186
 
187
  /* Push the first edge on to the stack.  */
188
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
189
 
190
  while (sp)
191
    {
192
      edge_iterator ei;
193
      basic_block src;
194
      basic_block dest;
195
 
196
      /* Look at the edge on the top of the stack.  */
197
      ei = stack[sp - 1];
198
      src = ei_edge (ei)->src;
199
      dest = ei_edge (ei)->dest;
200
      ei_edge (ei)->flags &= ~EDGE_DFS_BACK;
201
 
202
      /* Check if the edge destination has been visited yet.  */
203
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
204
        {
205
          /* Mark that we have visited the destination.  */
206
          SET_BIT (visited, dest->index);
207
 
208
          pre[dest->index] = prenum++;
209
          if (EDGE_COUNT (dest->succs) > 0)
210
            {
211
              /* Since the DEST node has been visited for the first
212
                 time, check its successors.  */
213
              stack[sp++] = ei_start (dest->succs);
214
            }
215
          else
216
            post[dest->index] = postnum++;
217
        }
218
      else
219
        {
220
          if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
221
              && pre[src->index] >= pre[dest->index]
222
              && post[dest->index] == 0)
223
            ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true;
224
 
225
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
226
            post[src->index] = postnum++;
227
 
228
          if (!ei_one_before_end_p (ei))
229
            ei_next (&stack[sp - 1]);
230
          else
231
            sp--;
232
        }
233
    }
234
 
235
  free (pre);
236
  free (post);
237
  free (stack);
238
  sbitmap_free (visited);
239
 
240
  return found;
241
}
242
 
243
/* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru.  */
244
 
245
void
246
set_edge_can_fallthru_flag (void)
247
{
248
  basic_block bb;
249
 
250
  FOR_EACH_BB (bb)
251
    {
252
      edge e;
253
      edge_iterator ei;
254
 
255
      FOR_EACH_EDGE (e, ei, bb->succs)
256
        {
257
          e->flags &= ~EDGE_CAN_FALLTHRU;
258
 
259
          /* The FALLTHRU edge is also CAN_FALLTHRU edge.  */
260
          if (e->flags & EDGE_FALLTHRU)
261
            e->flags |= EDGE_CAN_FALLTHRU;
262
        }
263
 
264
      /* If the BB ends with an invertible condjump all (2) edges are
265
         CAN_FALLTHRU edges.  */
266
      if (EDGE_COUNT (bb->succs) != 2)
267
        continue;
268
      if (!any_condjump_p (BB_END (bb)))
269
        continue;
270
      if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0))
271
        continue;
272
      invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0);
273
      EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU;
274
      EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU;
275
    }
276
}
277
 
278
/* Find unreachable blocks.  An unreachable block will have 0 in
279
   the reachable bit in block->flags.  A nonzero value indicates the
280
   block is reachable.  */
281
 
282
void
283
find_unreachable_blocks (void)
284
{
285
  edge e;
286
  edge_iterator ei;
287
  basic_block *tos, *worklist, bb;
288
 
289
  tos = worklist = XNEWVEC (basic_block, n_basic_blocks);
290
 
291
  /* Clear all the reachability flags.  */
292
 
293
  FOR_EACH_BB (bb)
294
    bb->flags &= ~BB_REACHABLE;
295
 
296
  /* Add our starting points to the worklist.  Almost always there will
297
     be only one.  It isn't inconceivable that we might one day directly
298
     support Fortran alternate entry points.  */
299
 
300
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
301
    {
302
      *tos++ = e->dest;
303
 
304
      /* Mark the block reachable.  */
305
      e->dest->flags |= BB_REACHABLE;
306
    }
307
 
308
  /* Iterate: find everything reachable from what we've already seen.  */
309
 
310
  while (tos != worklist)
311
    {
312
      basic_block b = *--tos;
313
 
314
      FOR_EACH_EDGE (e, ei, b->succs)
315
        {
316
          basic_block dest = e->dest;
317
 
318
          if (!(dest->flags & BB_REACHABLE))
319
            {
320
              *tos++ = dest;
321
              dest->flags |= BB_REACHABLE;
322
            }
323
        }
324
    }
325
 
326
  free (worklist);
327
}
328
 
329
/* Functions to access an edge list with a vector representation.
330
   Enough data is kept such that given an index number, the
331
   pred and succ that edge represents can be determined, or
332
   given a pred and a succ, its index number can be returned.
333
   This allows algorithms which consume a lot of memory to
334
   represent the normally full matrix of edge (pred,succ) with a
335
   single indexed vector,  edge (EDGE_INDEX (pred, succ)), with no
336
   wasted space in the client code due to sparse flow graphs.  */
337
 
338
/* This functions initializes the edge list. Basically the entire
339
   flowgraph is processed, and all edges are assigned a number,
340
   and the data structure is filled in.  */
341
 
342
struct edge_list *
343
create_edge_list (void)
344
{
345
  struct edge_list *elist;
346
  edge e;
347
  int num_edges;
348
  int block_count;
349
  basic_block bb;
350
  edge_iterator ei;
351
 
352
  block_count = n_basic_blocks; /* Include the entry and exit blocks.  */
353
 
354
  num_edges = 0;
355
 
356
  /* Determine the number of edges in the flow graph by counting successor
357
     edges on each basic block.  */
358
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
359
    {
360
      num_edges += EDGE_COUNT (bb->succs);
361
    }
362
 
363
  elist = XNEW (struct edge_list);
364
  elist->num_blocks = block_count;
365
  elist->num_edges = num_edges;
366
  elist->index_to_edge = XNEWVEC (edge, num_edges);
367
 
368
  num_edges = 0;
369
 
370
  /* Follow successors of blocks, and register these edges.  */
371
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
372
    FOR_EACH_EDGE (e, ei, bb->succs)
373
      elist->index_to_edge[num_edges++] = e;
374
 
375
  return elist;
376
}
377
 
378
/* This function free's memory associated with an edge list.  */
379
 
380
void
381
free_edge_list (struct edge_list *elist)
382
{
383
  if (elist)
384
    {
385
      free (elist->index_to_edge);
386
      free (elist);
387
    }
388
}
389
 
390
/* This function provides debug output showing an edge list.  */
391
 
392
DEBUG_FUNCTION void
393
print_edge_list (FILE *f, struct edge_list *elist)
394
{
395
  int x;
396
 
397
  fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
398
           elist->num_blocks, elist->num_edges);
399
 
400
  for (x = 0; x < elist->num_edges; x++)
401
    {
402
      fprintf (f, " %-4d - edge(", x);
403
      if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
404
        fprintf (f, "entry,");
405
      else
406
        fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
407
 
408
      if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
409
        fprintf (f, "exit)\n");
410
      else
411
        fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
412
    }
413
}
414
 
415
/* This function provides an internal consistency check of an edge list,
416
   verifying that all edges are present, and that there are no
417
   extra edges.  */
418
 
419
DEBUG_FUNCTION void
420
verify_edge_list (FILE *f, struct edge_list *elist)
421
{
422
  int pred, succ, index;
423
  edge e;
424
  basic_block bb, p, s;
425
  edge_iterator ei;
426
 
427
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
428
    {
429
      FOR_EACH_EDGE (e, ei, bb->succs)
430
        {
431
          pred = e->src->index;
432
          succ = e->dest->index;
433
          index = EDGE_INDEX (elist, e->src, e->dest);
434
          if (index == EDGE_INDEX_NO_EDGE)
435
            {
436
              fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ);
437
              continue;
438
            }
439
 
440
          if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
441
            fprintf (f, "*p* Pred for index %d should be %d not %d\n",
442
                     index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
443
          if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
444
            fprintf (f, "*p* Succ for index %d should be %d not %d\n",
445
                     index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
446
        }
447
    }
448
 
449
  /* We've verified that all the edges are in the list, now lets make sure
450
     there are no spurious edges in the list.  */
451
 
452
  FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
453
    FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
454
      {
455
        int found_edge = 0;
456
 
457
        FOR_EACH_EDGE (e, ei, p->succs)
458
          if (e->dest == s)
459
            {
460
              found_edge = 1;
461
              break;
462
            }
463
 
464
        FOR_EACH_EDGE (e, ei, s->preds)
465
          if (e->src == p)
466
            {
467
              found_edge = 1;
468
              break;
469
            }
470
 
471
        if (EDGE_INDEX (elist, p, s)
472
            == EDGE_INDEX_NO_EDGE && found_edge != 0)
473
          fprintf (f, "*** Edge (%d, %d) appears to not have an index\n",
474
                   p->index, s->index);
475
        if (EDGE_INDEX (elist, p, s)
476
            != EDGE_INDEX_NO_EDGE && found_edge == 0)
477
          fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n",
478
                   p->index, s->index, EDGE_INDEX (elist, p, s));
479
      }
480
}
481
 
482
/* Given PRED and SUCC blocks, return the edge which connects the blocks.
483
   If no such edge exists, return NULL.  */
484
 
485
edge
486
find_edge (basic_block pred, basic_block succ)
487
{
488
  edge e;
489
  edge_iterator ei;
490
 
491
  if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
492
    {
493
      FOR_EACH_EDGE (e, ei, pred->succs)
494
        if (e->dest == succ)
495
          return e;
496
    }
497
  else
498
    {
499
      FOR_EACH_EDGE (e, ei, succ->preds)
500
        if (e->src == pred)
501
          return e;
502
    }
503
 
504
  return NULL;
505
}
506
 
507
/* This routine will determine what, if any, edge there is between
508
   a specified predecessor and successor.  */
509
 
510
int
511
find_edge_index (struct edge_list *edge_list, basic_block pred, basic_block succ)
512
{
513
  int x;
514
 
515
  for (x = 0; x < NUM_EDGES (edge_list); x++)
516
    if (INDEX_EDGE_PRED_BB (edge_list, x) == pred
517
        && INDEX_EDGE_SUCC_BB (edge_list, x) == succ)
518
      return x;
519
 
520
  return (EDGE_INDEX_NO_EDGE);
521
}
522
 
523
/* Dump the list of basic blocks in the bitmap NODES.  */
524
 
525
void
526
flow_nodes_print (const char *str, const_sbitmap nodes, FILE *file)
527
{
528
  unsigned int node = 0;
529
  sbitmap_iterator sbi;
530
 
531
  if (! nodes)
532
    return;
533
 
534
  fprintf (file, "%s { ", str);
535
  EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, sbi)
536
    fprintf (file, "%d ", node);
537
  fputs ("}\n", file);
538
}
539
 
540
/* Dump the list of edges in the array EDGE_LIST.  */
541
 
542
void
543
flow_edge_list_print (const char *str, const edge *edge_list, int num_edges, FILE *file)
544
{
545
  int i;
546
 
547
  if (! edge_list)
548
    return;
549
 
550
  fprintf (file, "%s { ", str);
551
  for (i = 0; i < num_edges; i++)
552
    fprintf (file, "%d->%d ", edge_list[i]->src->index,
553
             edge_list[i]->dest->index);
554
 
555
  fputs ("}\n", file);
556
}
557
 
558
 
559
/* This routine will remove any fake predecessor edges for a basic block.
560
   When the edge is removed, it is also removed from whatever successor
561
   list it is in.  */
562
 
563
static void
564
remove_fake_predecessors (basic_block bb)
565
{
566
  edge e;
567
  edge_iterator ei;
568
 
569
  for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
570
    {
571
      if ((e->flags & EDGE_FAKE) == EDGE_FAKE)
572
        remove_edge (e);
573
      else
574
        ei_next (&ei);
575
    }
576
}
577
 
578
/* This routine will remove all fake edges from the flow graph.  If
579
   we remove all fake successors, it will automatically remove all
580
   fake predecessors.  */
581
 
582
void
583
remove_fake_edges (void)
584
{
585
  basic_block bb;
586
 
587
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
588
    remove_fake_predecessors (bb);
589
}
590
 
591
/* This routine will remove all fake edges to the EXIT_BLOCK.  */
592
 
593
void
594
remove_fake_exit_edges (void)
595
{
596
  remove_fake_predecessors (EXIT_BLOCK_PTR);
597
}
598
 
599
 
600
/* This function will add a fake edge between any block which has no
601
   successors, and the exit block. Some data flow equations require these
602
   edges to exist.  */
603
 
604
void
605
add_noreturn_fake_exit_edges (void)
606
{
607
  basic_block bb;
608
 
609
  FOR_EACH_BB (bb)
610
    if (EDGE_COUNT (bb->succs) == 0)
611
      make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
612
}
613
 
614
/* This function adds a fake edge between any infinite loops to the
615
   exit block.  Some optimizations require a path from each node to
616
   the exit node.
617
 
618
   See also Morgan, Figure 3.10, pp. 82-83.
619
 
620
   The current implementation is ugly, not attempting to minimize the
621
   number of inserted fake edges.  To reduce the number of fake edges
622
   to insert, add fake edges from _innermost_ loops containing only
623
   nodes not reachable from the exit block.  */
624
 
625
void
626
connect_infinite_loops_to_exit (void)
627
{
628
  basic_block unvisited_block = EXIT_BLOCK_PTR;
629
  struct depth_first_search_dsS dfs_ds;
630
 
631
  /* Perform depth-first search in the reverse graph to find nodes
632
     reachable from the exit block.  */
633
  flow_dfs_compute_reverse_init (&dfs_ds);
634
  flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR);
635
 
636
  /* Repeatedly add fake edges, updating the unreachable nodes.  */
637
  while (1)
638
    {
639
      unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds,
640
                                                          unvisited_block);
641
      if (!unvisited_block)
642
        break;
643
 
644
      make_edge (unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE);
645
      flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block);
646
    }
647
 
648
  flow_dfs_compute_reverse_finish (&dfs_ds);
649
  return;
650
}
651
 
652
/* Compute reverse top sort order.  This is computing a post order
653
   numbering of the graph.  If INCLUDE_ENTRY_EXIT is true, then
654
   ENTRY_BLOCK and EXIT_BLOCK are included.  If DELETE_UNREACHABLE is
655
   true, unreachable blocks are deleted.  */
656
 
657
int
658
post_order_compute (int *post_order, bool include_entry_exit,
659
                    bool delete_unreachable)
660
{
661
  edge_iterator *stack;
662
  int sp;
663
  int post_order_num = 0;
664
  sbitmap visited;
665
  int count;
666
 
667
  if (include_entry_exit)
668
    post_order[post_order_num++] = EXIT_BLOCK;
669
 
670
  /* Allocate stack for back-tracking up CFG.  */
671
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
672
  sp = 0;
673
 
674
  /* Allocate bitmap to track nodes that have been visited.  */
675
  visited = sbitmap_alloc (last_basic_block);
676
 
677
  /* None of the nodes in the CFG have been visited yet.  */
678
  sbitmap_zero (visited);
679
 
680
  /* Push the first edge on to the stack.  */
681
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
682
 
683
  while (sp)
684
    {
685
      edge_iterator ei;
686
      basic_block src;
687
      basic_block dest;
688
 
689
      /* Look at the edge on the top of the stack.  */
690
      ei = stack[sp - 1];
691
      src = ei_edge (ei)->src;
692
      dest = ei_edge (ei)->dest;
693
 
694
      /* Check if the edge destination has been visited yet.  */
695
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
696
        {
697
          /* Mark that we have visited the destination.  */
698
          SET_BIT (visited, dest->index);
699
 
700
          if (EDGE_COUNT (dest->succs) > 0)
701
            /* Since the DEST node has been visited for the first
702
               time, check its successors.  */
703
            stack[sp++] = ei_start (dest->succs);
704
          else
705
            post_order[post_order_num++] = dest->index;
706
        }
707
      else
708
        {
709
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
710
            post_order[post_order_num++] = src->index;
711
 
712
          if (!ei_one_before_end_p (ei))
713
            ei_next (&stack[sp - 1]);
714
          else
715
            sp--;
716
        }
717
    }
718
 
719
  if (include_entry_exit)
720
    {
721
      post_order[post_order_num++] = ENTRY_BLOCK;
722
      count = post_order_num;
723
    }
724
  else
725
    count = post_order_num + 2;
726
 
727
  /* Delete the unreachable blocks if some were found and we are
728
     supposed to do it.  */
729
  if (delete_unreachable && (count != n_basic_blocks))
730
    {
731
      basic_block b;
732
      basic_block next_bb;
733
      for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
734
        {
735
          next_bb = b->next_bb;
736
 
737
          if (!(TEST_BIT (visited, b->index)))
738
            delete_basic_block (b);
739
        }
740
 
741
      tidy_fallthru_edges ();
742
    }
743
 
744
  free (stack);
745
  sbitmap_free (visited);
746
  return post_order_num;
747
}
748
 
749
 
750
/* Helper routine for inverted_post_order_compute.
751
   BB has to belong to a region of CFG
752
   unreachable by inverted traversal from the exit.
753
   i.e. there's no control flow path from ENTRY to EXIT
754
   that contains this BB.
755
   This can happen in two cases - if there's an infinite loop
756
   or if there's a block that has no successor
757
   (call to a function with no return).
758
   Some RTL passes deal with this condition by
759
   calling connect_infinite_loops_to_exit () and/or
760
   add_noreturn_fake_exit_edges ().
761
   However, those methods involve modifying the CFG itself
762
   which may not be desirable.
763
   Hence, we deal with the infinite loop/no return cases
764
   by identifying a unique basic block that can reach all blocks
765
   in such a region by inverted traversal.
766
   This function returns a basic block that guarantees
767
   that all blocks in the region are reachable
768
   by starting an inverted traversal from the returned block.  */
769
 
770
static basic_block
771
dfs_find_deadend (basic_block bb)
772
{
773
  sbitmap visited = sbitmap_alloc (last_basic_block);
774
  sbitmap_zero (visited);
775
 
776
  for (;;)
777
    {
778
      SET_BIT (visited, bb->index);
779
      if (EDGE_COUNT (bb->succs) == 0
780
          || TEST_BIT (visited, EDGE_SUCC (bb, 0)->dest->index))
781
        {
782
          sbitmap_free (visited);
783
          return bb;
784
        }
785
 
786
      bb = EDGE_SUCC (bb, 0)->dest;
787
    }
788
 
789
  gcc_unreachable ();
790
}
791
 
792
 
793
/* Compute the reverse top sort order of the inverted CFG
794
   i.e. starting from the exit block and following the edges backward
795
   (from successors to predecessors).
796
   This ordering can be used for forward dataflow problems among others.
797
 
798
   This function assumes that all blocks in the CFG are reachable
799
   from the ENTRY (but not necessarily from EXIT).
800
 
801
   If there's an infinite loop,
802
   a simple inverted traversal starting from the blocks
803
   with no successors can't visit all blocks.
804
   To solve this problem, we first do inverted traversal
805
   starting from the blocks with no successor.
806
   And if there's any block left that's not visited by the regular
807
   inverted traversal from EXIT,
808
   those blocks are in such problematic region.
809
   Among those, we find one block that has
810
   any visited predecessor (which is an entry into such a region),
811
   and start looking for a "dead end" from that block
812
   and do another inverted traversal from that block.  */
813
 
814
int
815
inverted_post_order_compute (int *post_order)
816
{
817
  basic_block bb;
818
  edge_iterator *stack;
819
  int sp;
820
  int post_order_num = 0;
821
  sbitmap visited;
822
 
823
  /* Allocate stack for back-tracking up CFG.  */
824
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
825
  sp = 0;
826
 
827
  /* Allocate bitmap to track nodes that have been visited.  */
828
  visited = sbitmap_alloc (last_basic_block);
829
 
830
  /* None of the nodes in the CFG have been visited yet.  */
831
  sbitmap_zero (visited);
832
 
833
  /* Put all blocks that have no successor into the initial work list.  */
834
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
835
    if (EDGE_COUNT (bb->succs) == 0)
836
      {
837
        /* Push the initial edge on to the stack.  */
838
        if (EDGE_COUNT (bb->preds) > 0)
839
          {
840
            stack[sp++] = ei_start (bb->preds);
841
            SET_BIT (visited, bb->index);
842
          }
843
      }
844
 
845
  do
846
    {
847
      bool has_unvisited_bb = false;
848
 
849
      /* The inverted traversal loop. */
850
      while (sp)
851
        {
852
          edge_iterator ei;
853
          basic_block pred;
854
 
855
          /* Look at the edge on the top of the stack.  */
856
          ei = stack[sp - 1];
857
          bb = ei_edge (ei)->dest;
858
          pred = ei_edge (ei)->src;
859
 
860
          /* Check if the predecessor has been visited yet.  */
861
          if (! TEST_BIT (visited, pred->index))
862
            {
863
              /* Mark that we have visited the destination.  */
864
              SET_BIT (visited, pred->index);
865
 
866
              if (EDGE_COUNT (pred->preds) > 0)
867
                /* Since the predecessor node has been visited for the first
868
                   time, check its predecessors.  */
869
                stack[sp++] = ei_start (pred->preds);
870
              else
871
                post_order[post_order_num++] = pred->index;
872
            }
873
          else
874
            {
875
              if (bb != EXIT_BLOCK_PTR && ei_one_before_end_p (ei))
876
                post_order[post_order_num++] = bb->index;
877
 
878
              if (!ei_one_before_end_p (ei))
879
                ei_next (&stack[sp - 1]);
880
              else
881
                sp--;
882
            }
883
        }
884
 
885
      /* Detect any infinite loop and activate the kludge.
886
         Note that this doesn't check EXIT_BLOCK itself
887
         since EXIT_BLOCK is always added after the outer do-while loop.  */
888
      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
889
        if (!TEST_BIT (visited, bb->index))
890
          {
891
            has_unvisited_bb = true;
892
 
893
            if (EDGE_COUNT (bb->preds) > 0)
894
              {
895
                edge_iterator ei;
896
                edge e;
897
                basic_block visited_pred = NULL;
898
 
899
                /* Find an already visited predecessor.  */
900
                FOR_EACH_EDGE (e, ei, bb->preds)
901
                  {
902
                    if (TEST_BIT (visited, e->src->index))
903
                      visited_pred = e->src;
904
                  }
905
 
906
                if (visited_pred)
907
                  {
908
                    basic_block be = dfs_find_deadend (bb);
909
                    gcc_assert (be != NULL);
910
                    SET_BIT (visited, be->index);
911
                    stack[sp++] = ei_start (be->preds);
912
                    break;
913
                  }
914
              }
915
          }
916
 
917
      if (has_unvisited_bb && sp == 0)
918
        {
919
          /* No blocks are reachable from EXIT at all.
920
             Find a dead-end from the ENTRY, and restart the iteration. */
921
          basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR);
922
          gcc_assert (be != NULL);
923
          SET_BIT (visited, be->index);
924
          stack[sp++] = ei_start (be->preds);
925
        }
926
 
927
      /* The only case the below while fires is
928
         when there's an infinite loop.  */
929
    }
930
  while (sp);
931
 
932
  /* EXIT_BLOCK is always included.  */
933
  post_order[post_order_num++] = EXIT_BLOCK;
934
 
935
  free (stack);
936
  sbitmap_free (visited);
937
  return post_order_num;
938
}
939
 
940
/* Compute the depth first search order and store in the array
941
  PRE_ORDER if nonzero, marking the nodes visited in VISITED.  If
942
  REV_POST_ORDER is nonzero, return the reverse completion number for each
943
  node.  Returns the number of nodes visited.  A depth first search
944
  tries to get as far away from the starting point as quickly as
945
  possible.
946
 
947
  pre_order is a really a preorder numbering of the graph.
948
  rev_post_order is really a reverse postorder numbering of the graph.
949
 */
950
 
951
int
952
pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
953
                                bool include_entry_exit)
954
{
955
  edge_iterator *stack;
956
  int sp;
957
  int pre_order_num = 0;
958
  int rev_post_order_num = n_basic_blocks - 1;
959
  sbitmap visited;
960
 
961
  /* Allocate stack for back-tracking up CFG.  */
962
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
963
  sp = 0;
964
 
965
  if (include_entry_exit)
966
    {
967
      if (pre_order)
968
        pre_order[pre_order_num] = ENTRY_BLOCK;
969
      pre_order_num++;
970
      if (rev_post_order)
971
        rev_post_order[rev_post_order_num--] = ENTRY_BLOCK;
972
    }
973
  else
974
    rev_post_order_num -= NUM_FIXED_BLOCKS;
975
 
976
  /* Allocate bitmap to track nodes that have been visited.  */
977
  visited = sbitmap_alloc (last_basic_block);
978
 
979
  /* None of the nodes in the CFG have been visited yet.  */
980
  sbitmap_zero (visited);
981
 
982
  /* Push the first edge on to the stack.  */
983
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
984
 
985
  while (sp)
986
    {
987
      edge_iterator ei;
988
      basic_block src;
989
      basic_block dest;
990
 
991
      /* Look at the edge on the top of the stack.  */
992
      ei = stack[sp - 1];
993
      src = ei_edge (ei)->src;
994
      dest = ei_edge (ei)->dest;
995
 
996
      /* Check if the edge destination has been visited yet.  */
997
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
998
        {
999
          /* Mark that we have visited the destination.  */
1000
          SET_BIT (visited, dest->index);
1001
 
1002
          if (pre_order)
1003
            pre_order[pre_order_num] = dest->index;
1004
 
1005
          pre_order_num++;
1006
 
1007
          if (EDGE_COUNT (dest->succs) > 0)
1008
            /* Since the DEST node has been visited for the first
1009
               time, check its successors.  */
1010
            stack[sp++] = ei_start (dest->succs);
1011
          else if (rev_post_order)
1012
            /* There are no successors for the DEST node so assign
1013
               its reverse completion number.  */
1014
            rev_post_order[rev_post_order_num--] = dest->index;
1015
        }
1016
      else
1017
        {
1018
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR
1019
              && rev_post_order)
1020
            /* There are no more successors for the SRC node
1021
               so assign its reverse completion number.  */
1022
            rev_post_order[rev_post_order_num--] = src->index;
1023
 
1024
          if (!ei_one_before_end_p (ei))
1025
            ei_next (&stack[sp - 1]);
1026
          else
1027
            sp--;
1028
        }
1029
    }
1030
 
1031
  free (stack);
1032
  sbitmap_free (visited);
1033
 
1034
  if (include_entry_exit)
1035
    {
1036
      if (pre_order)
1037
        pre_order[pre_order_num] = EXIT_BLOCK;
1038
      pre_order_num++;
1039
      if (rev_post_order)
1040
        rev_post_order[rev_post_order_num--] = EXIT_BLOCK;
1041
      /* The number of nodes visited should be the number of blocks.  */
1042
      gcc_assert (pre_order_num == n_basic_blocks);
1043
    }
1044
  else
1045
    /* The number of nodes visited should be the number of blocks minus
1046
       the entry and exit blocks which are not visited here.  */
1047
    gcc_assert (pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS);
1048
 
1049
  return pre_order_num;
1050
}
1051
 
1052
/* Compute the depth first search order on the _reverse_ graph and
1053
   store in the array DFS_ORDER, marking the nodes visited in VISITED.
1054
   Returns the number of nodes visited.
1055
 
1056
   The computation is split into three pieces:
1057
 
1058
   flow_dfs_compute_reverse_init () creates the necessary data
1059
   structures.
1060
 
1061
   flow_dfs_compute_reverse_add_bb () adds a basic block to the data
1062
   structures.  The block will start the search.
1063
 
1064
   flow_dfs_compute_reverse_execute () continues (or starts) the
1065
   search using the block on the top of the stack, stopping when the
1066
   stack is empty.
1067
 
1068
   flow_dfs_compute_reverse_finish () destroys the necessary data
1069
   structures.
1070
 
1071
   Thus, the user will probably call ..._init(), call ..._add_bb() to
1072
   add a beginning basic block to the stack, call ..._execute(),
1073
   possibly add another bb to the stack and again call ..._execute(),
1074
   ..., and finally call _finish().  */
1075
 
1076
/* Initialize the data structures used for depth-first search on the
1077
   reverse graph.  If INITIALIZE_STACK is nonzero, the exit block is
1078
   added to the basic block stack.  DATA is the current depth-first
1079
   search context.  If INITIALIZE_STACK is nonzero, there is an
1080
   element on the stack.  */
1081
 
1082
static void
1083
flow_dfs_compute_reverse_init (depth_first_search_ds data)
1084
{
1085
  /* Allocate stack for back-tracking up CFG.  */
1086
  data->stack = XNEWVEC (basic_block, n_basic_blocks);
1087
  data->sp = 0;
1088
 
1089
  /* Allocate bitmap to track nodes that have been visited.  */
1090
  data->visited_blocks = sbitmap_alloc (last_basic_block);
1091
 
1092
  /* None of the nodes in the CFG have been visited yet.  */
1093
  sbitmap_zero (data->visited_blocks);
1094
 
1095
  return;
1096
}
1097
 
1098
/* Add the specified basic block to the top of the dfs data
1099
   structures.  When the search continues, it will start at the
1100
   block.  */
1101
 
1102
static void
1103
flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb)
1104
{
1105
  data->stack[data->sp++] = bb;
1106
  SET_BIT (data->visited_blocks, bb->index);
1107
}
1108
 
1109
/* Continue the depth-first search through the reverse graph starting with the
1110
   block at the stack's top and ending when the stack is empty.  Visited nodes
1111
   are marked.  Returns an unvisited basic block, or NULL if there is none
1112
   available.  */
1113
 
1114
static basic_block
1115
flow_dfs_compute_reverse_execute (depth_first_search_ds data,
1116
                                  basic_block last_unvisited)
1117
{
1118
  basic_block bb;
1119
  edge e;
1120
  edge_iterator ei;
1121
 
1122
  while (data->sp > 0)
1123
    {
1124
      bb = data->stack[--data->sp];
1125
 
1126
      /* Perform depth-first search on adjacent vertices.  */
1127
      FOR_EACH_EDGE (e, ei, bb->preds)
1128
        if (!TEST_BIT (data->visited_blocks, e->src->index))
1129
          flow_dfs_compute_reverse_add_bb (data, e->src);
1130
    }
1131
 
1132
  /* Determine if there are unvisited basic blocks.  */
1133
  FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb)
1134
    if (!TEST_BIT (data->visited_blocks, bb->index))
1135
      return bb;
1136
 
1137
  return NULL;
1138
}
1139
 
1140
/* Destroy the data structures needed for depth-first search on the
1141
   reverse graph.  */
1142
 
1143
static void
1144
flow_dfs_compute_reverse_finish (depth_first_search_ds data)
1145
{
1146
  free (data->stack);
1147
  sbitmap_free (data->visited_blocks);
1148
}
1149
 
1150
/* Performs dfs search from BB over vertices satisfying PREDICATE;
1151
   if REVERSE, go against direction of edges.  Returns number of blocks
1152
   found and their list in RSLT.  RSLT can contain at most RSLT_MAX items.  */
1153
int
1154
dfs_enumerate_from (basic_block bb, int reverse,
1155
                    bool (*predicate) (const_basic_block, const void *),
1156
                    basic_block *rslt, int rslt_max, const void *data)
1157
{
1158
  basic_block *st, lbb;
1159
  int sp = 0, tv = 0;
1160
  unsigned size;
1161
 
1162
  /* A bitmap to keep track of visited blocks.  Allocating it each time
1163
     this function is called is not possible, since dfs_enumerate_from
1164
     is often used on small (almost) disjoint parts of cfg (bodies of
1165
     loops), and allocating a large sbitmap would lead to quadratic
1166
     behavior.  */
1167
  static sbitmap visited;
1168
  static unsigned v_size;
1169
 
1170
#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
1171
#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index))
1172
#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
1173
 
1174
  /* Resize the VISITED sbitmap if necessary.  */
1175
  size = last_basic_block;
1176
  if (size < 10)
1177
    size = 10;
1178
 
1179
  if (!visited)
1180
    {
1181
 
1182
      visited = sbitmap_alloc (size);
1183
      sbitmap_zero (visited);
1184
      v_size = size;
1185
    }
1186
  else if (v_size < size)
1187
    {
1188
      /* Ensure that we increase the size of the sbitmap exponentially.  */
1189
      if (2 * v_size > size)
1190
        size = 2 * v_size;
1191
 
1192
      visited = sbitmap_resize (visited, size, 0);
1193
      v_size = size;
1194
    }
1195
 
1196
  st = XCNEWVEC (basic_block, rslt_max);
1197
  rslt[tv++] = st[sp++] = bb;
1198
  MARK_VISITED (bb);
1199
  while (sp)
1200
    {
1201
      edge e;
1202
      edge_iterator ei;
1203
      lbb = st[--sp];
1204
      if (reverse)
1205
        {
1206
          FOR_EACH_EDGE (e, ei, lbb->preds)
1207
            if (!VISITED_P (e->src) && predicate (e->src, data))
1208
              {
1209
                gcc_assert (tv != rslt_max);
1210
                rslt[tv++] = st[sp++] = e->src;
1211
                MARK_VISITED (e->src);
1212
              }
1213
        }
1214
      else
1215
        {
1216
          FOR_EACH_EDGE (e, ei, lbb->succs)
1217
            if (!VISITED_P (e->dest) && predicate (e->dest, data))
1218
              {
1219
                gcc_assert (tv != rslt_max);
1220
                rslt[tv++] = st[sp++] = e->dest;
1221
                MARK_VISITED (e->dest);
1222
              }
1223
        }
1224
    }
1225
  free (st);
1226
  for (sp = 0; sp < tv; sp++)
1227
    UNMARK_VISITED (rslt[sp]);
1228
  return tv;
1229
#undef MARK_VISITED
1230
#undef UNMARK_VISITED
1231
#undef VISITED_P
1232
}
1233
 
1234
 
1235
/* Compute dominance frontiers, ala Harvey, Ferrante, et al.
1236
 
1237
   This algorithm can be found in Timothy Harvey's PhD thesis, at
1238
   http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative
1239
   dominance algorithms.
1240
 
1241
   First, we identify each join point, j (any node with more than one
1242
   incoming edge is a join point).
1243
 
1244
   We then examine each predecessor, p, of j and walk up the dominator tree
1245
   starting at p.
1246
 
1247
   We stop the walk when we reach j's immediate dominator - j is in the
1248
   dominance frontier of each of  the nodes in the walk, except for j's
1249
   immediate dominator. Intuitively, all of the rest of j's dominators are
1250
   shared by j's predecessors as well.
1251
   Since they dominate j, they will not have j in their dominance frontiers.
1252
 
1253
   The number of nodes touched by this algorithm is equal to the size
1254
   of the dominance frontiers, no more, no less.
1255
*/
1256
 
1257
 
1258
static void
1259
compute_dominance_frontiers_1 (bitmap_head *frontiers)
1260
{
1261
  edge p;
1262
  edge_iterator ei;
1263
  basic_block b;
1264
  FOR_EACH_BB (b)
1265
    {
1266
      if (EDGE_COUNT (b->preds) >= 2)
1267
        {
1268
          FOR_EACH_EDGE (p, ei, b->preds)
1269
            {
1270
              basic_block runner = p->src;
1271
              basic_block domsb;
1272
              if (runner == ENTRY_BLOCK_PTR)
1273
                continue;
1274
 
1275
              domsb = get_immediate_dominator (CDI_DOMINATORS, b);
1276
              while (runner != domsb)
1277
                {
1278
                  if (!bitmap_set_bit (&frontiers[runner->index],
1279
                                       b->index))
1280
                    break;
1281
                  runner = get_immediate_dominator (CDI_DOMINATORS,
1282
                                                    runner);
1283
                }
1284
            }
1285
        }
1286
    }
1287
}
1288
 
1289
 
1290
void
1291
compute_dominance_frontiers (bitmap_head *frontiers)
1292
{
1293
  timevar_push (TV_DOM_FRONTIERS);
1294
 
1295
  compute_dominance_frontiers_1 (frontiers);
1296
 
1297
  timevar_pop (TV_DOM_FRONTIERS);
1298
}
1299
 
1300
/* Given a set of blocks with variable definitions (DEF_BLOCKS),
1301
   return a bitmap with all the blocks in the iterated dominance
1302
   frontier of the blocks in DEF_BLOCKS.  DFS contains dominance
1303
   frontier information as returned by compute_dominance_frontiers.
1304
 
1305
   The resulting set of blocks are the potential sites where PHI nodes
1306
   are needed.  The caller is responsible for freeing the memory
1307
   allocated for the return value.  */
1308
 
1309
bitmap
1310
compute_idf (bitmap def_blocks, bitmap_head *dfs)
1311
{
1312
  bitmap_iterator bi;
1313
  unsigned bb_index, i;
1314
  VEC(int,heap) *work_stack;
1315
  bitmap phi_insertion_points;
1316
 
1317
  work_stack = VEC_alloc (int, heap, n_basic_blocks);
1318
  phi_insertion_points = BITMAP_ALLOC (NULL);
1319
 
1320
  /* Seed the work list with all the blocks in DEF_BLOCKS.  We use
1321
     VEC_quick_push here for speed.  This is safe because we know that
1322
     the number of definition blocks is no greater than the number of
1323
     basic blocks, which is the initial capacity of WORK_STACK.  */
1324
  EXECUTE_IF_SET_IN_BITMAP (def_blocks, 0, bb_index, bi)
1325
    VEC_quick_push (int, work_stack, bb_index);
1326
 
1327
  /* Pop a block off the worklist, add every block that appears in
1328
     the original block's DF that we have not already processed to
1329
     the worklist.  Iterate until the worklist is empty.   Blocks
1330
     which are added to the worklist are potential sites for
1331
     PHI nodes.  */
1332
  while (VEC_length (int, work_stack) > 0)
1333
    {
1334
      bb_index = VEC_pop (int, work_stack);
1335
 
1336
      /* Since the registration of NEW -> OLD name mappings is done
1337
         separately from the call to update_ssa, when updating the SSA
1338
         form, the basic blocks where new and/or old names are defined
1339
         may have disappeared by CFG cleanup calls.  In this case,
1340
         we may pull a non-existing block from the work stack.  */
1341
      gcc_assert (bb_index < (unsigned) last_basic_block);
1342
 
1343
      EXECUTE_IF_AND_COMPL_IN_BITMAP (&dfs[bb_index], phi_insertion_points,
1344
                                      0, i, bi)
1345
        {
1346
          /* Use a safe push because if there is a definition of VAR
1347
             in every basic block, then WORK_STACK may eventually have
1348
             more than N_BASIC_BLOCK entries.  */
1349
          VEC_safe_push (int, heap, work_stack, i);
1350
          bitmap_set_bit (phi_insertion_points, i);
1351
        }
1352
    }
1353
 
1354
  VEC_free (int, heap, work_stack);
1355
 
1356
  return phi_insertion_points;
1357
}
1358
 
1359
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.