OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [cfganal.c] - Blame information for rev 325

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Control flow graph analysis code for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
/* This file contains various simple utilities to analyze the CFG.  */
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "rtl.h"
28
#include "obstack.h"
29
#include "hard-reg-set.h"
30
#include "basic-block.h"
31
#include "insn-config.h"
32
#include "recog.h"
33
#include "toplev.h"
34
#include "tm_p.h"
35
#include "vec.h"
36
#include "vecprim.h"
37
#include "timevar.h"
38
 
39
/* Store the data structures necessary for depth-first search.  */
40
struct depth_first_search_dsS {
41
  /* stack for backtracking during the algorithm */
42
  basic_block *stack;
43
 
44
  /* number of edges in the stack.  That is, positions 0, ..., sp-1
45
     have edges.  */
46
  unsigned int sp;
47
 
48
  /* record of basic blocks already seen by depth-first search */
49
  sbitmap visited_blocks;
50
};
51
typedef struct depth_first_search_dsS *depth_first_search_ds;
52
 
53
static void flow_dfs_compute_reverse_init (depth_first_search_ds);
54
static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds,
55
                                             basic_block);
56
static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds,
57
                                                     basic_block);
58
static void flow_dfs_compute_reverse_finish (depth_first_search_ds);
59
static bool flow_active_insn_p (const_rtx);
60
 
61
/* Like active_insn_p, except keep the return value clobber around
62
   even after reload.  */
63
 
64
static bool
65
flow_active_insn_p (const_rtx insn)
66
{
67
  if (active_insn_p (insn))
68
    return true;
69
 
70
  /* A clobber of the function return value exists for buggy
71
     programs that fail to return a value.  Its effect is to
72
     keep the return value from being live across the entire
73
     function.  If we allow it to be skipped, we introduce the
74
     possibility for register lifetime confusion.  */
75
  if (GET_CODE (PATTERN (insn)) == CLOBBER
76
      && REG_P (XEXP (PATTERN (insn), 0))
77
      && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
78
    return true;
79
 
80
  return false;
81
}
82
 
83
/* Return true if the block has no effect and only forwards control flow to
84
   its single destination.  */
85
 
86
bool
87
forwarder_block_p (const_basic_block bb)
88
{
89
  rtx insn;
90
 
91
  if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
92
      || !single_succ_p (bb))
93
    return false;
94
 
95
  for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
96
    if (INSN_P (insn) && flow_active_insn_p (insn))
97
      return false;
98
 
99
  return (!INSN_P (insn)
100
          || (JUMP_P (insn) && simplejump_p (insn))
101
          || !flow_active_insn_p (insn));
102
}
103
 
104
/* Return nonzero if we can reach target from src by falling through.  */
105
 
106
bool
107
can_fallthru (basic_block src, basic_block target)
108
{
109
  rtx insn = BB_END (src);
110
  rtx insn2;
111
  edge e;
112
  edge_iterator ei;
113
 
114
  if (target == EXIT_BLOCK_PTR)
115
    return true;
116
  if (src->next_bb != target)
117
    return 0;
118
  FOR_EACH_EDGE (e, ei, src->succs)
119
    if (e->dest == EXIT_BLOCK_PTR
120
        && e->flags & EDGE_FALLTHRU)
121
      return 0;
122
 
123
  insn2 = BB_HEAD (target);
124
  if (insn2 && !active_insn_p (insn2))
125
    insn2 = next_active_insn (insn2);
126
 
127
  /* ??? Later we may add code to move jump tables offline.  */
128
  return next_active_insn (insn) == insn2;
129
}
130
 
131
/* Return nonzero if we could reach target from src by falling through,
132
   if the target was made adjacent.  If we already have a fall-through
133
   edge to the exit block, we can't do that.  */
134
bool
135
could_fall_through (basic_block src, basic_block target)
136
{
137
  edge e;
138
  edge_iterator ei;
139
 
140
  if (target == EXIT_BLOCK_PTR)
141
    return true;
142
  FOR_EACH_EDGE (e, ei, src->succs)
143
    if (e->dest == EXIT_BLOCK_PTR
144
        && e->flags & EDGE_FALLTHRU)
145
      return 0;
146
  return true;
147
}
148
 
149
/* Mark the back edges in DFS traversal.
150
   Return nonzero if a loop (natural or otherwise) is present.
151
   Inspired by Depth_First_Search_PP described in:
152
 
153
     Advanced Compiler Design and Implementation
154
     Steven Muchnick
155
     Morgan Kaufmann, 1997
156
 
157
   and heavily borrowed from pre_and_rev_post_order_compute.  */
158
 
159
bool
160
mark_dfs_back_edges (void)
161
{
162
  edge_iterator *stack;
163
  int *pre;
164
  int *post;
165
  int sp;
166
  int prenum = 1;
167
  int postnum = 1;
168
  sbitmap visited;
169
  bool found = false;
170
 
171
  /* Allocate the preorder and postorder number arrays.  */
172
  pre = XCNEWVEC (int, last_basic_block);
173
  post = XCNEWVEC (int, last_basic_block);
174
 
175
  /* Allocate stack for back-tracking up CFG.  */
176
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
177
  sp = 0;
178
 
179
  /* Allocate bitmap to track nodes that have been visited.  */
180
  visited = sbitmap_alloc (last_basic_block);
181
 
182
  /* None of the nodes in the CFG have been visited yet.  */
183
  sbitmap_zero (visited);
184
 
185
  /* Push the first edge on to the stack.  */
186
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
187
 
188
  while (sp)
189
    {
190
      edge_iterator ei;
191
      basic_block src;
192
      basic_block dest;
193
 
194
      /* Look at the edge on the top of the stack.  */
195
      ei = stack[sp - 1];
196
      src = ei_edge (ei)->src;
197
      dest = ei_edge (ei)->dest;
198
      ei_edge (ei)->flags &= ~EDGE_DFS_BACK;
199
 
200
      /* Check if the edge destination has been visited yet.  */
201
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
202
        {
203
          /* Mark that we have visited the destination.  */
204
          SET_BIT (visited, dest->index);
205
 
206
          pre[dest->index] = prenum++;
207
          if (EDGE_COUNT (dest->succs) > 0)
208
            {
209
              /* Since the DEST node has been visited for the first
210
                 time, check its successors.  */
211
              stack[sp++] = ei_start (dest->succs);
212
            }
213
          else
214
            post[dest->index] = postnum++;
215
        }
216
      else
217
        {
218
          if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
219
              && pre[src->index] >= pre[dest->index]
220
              && post[dest->index] == 0)
221
            ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true;
222
 
223
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
224
            post[src->index] = postnum++;
225
 
226
          if (!ei_one_before_end_p (ei))
227
            ei_next (&stack[sp - 1]);
228
          else
229
            sp--;
230
        }
231
    }
232
 
233
  free (pre);
234
  free (post);
235
  free (stack);
236
  sbitmap_free (visited);
237
 
238
  return found;
239
}
240
 
241
/* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru.  */
242
 
243
void
244
set_edge_can_fallthru_flag (void)
245
{
246
  basic_block bb;
247
 
248
  FOR_EACH_BB (bb)
249
    {
250
      edge e;
251
      edge_iterator ei;
252
 
253
      FOR_EACH_EDGE (e, ei, bb->succs)
254
        {
255
          e->flags &= ~EDGE_CAN_FALLTHRU;
256
 
257
          /* The FALLTHRU edge is also CAN_FALLTHRU edge.  */
258
          if (e->flags & EDGE_FALLTHRU)
259
            e->flags |= EDGE_CAN_FALLTHRU;
260
        }
261
 
262
      /* If the BB ends with an invertible condjump all (2) edges are
263
         CAN_FALLTHRU edges.  */
264
      if (EDGE_COUNT (bb->succs) != 2)
265
        continue;
266
      if (!any_condjump_p (BB_END (bb)))
267
        continue;
268
      if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0))
269
        continue;
270
      invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0);
271
      EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU;
272
      EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU;
273
    }
274
}
275
 
276
/* Find unreachable blocks.  An unreachable block will have 0 in
277
   the reachable bit in block->flags.  A nonzero value indicates the
278
   block is reachable.  */
279
 
280
void
281
find_unreachable_blocks (void)
282
{
283
  edge e;
284
  edge_iterator ei;
285
  basic_block *tos, *worklist, bb;
286
 
287
  tos = worklist = XNEWVEC (basic_block, n_basic_blocks);
288
 
289
  /* Clear all the reachability flags.  */
290
 
291
  FOR_EACH_BB (bb)
292
    bb->flags &= ~BB_REACHABLE;
293
 
294
  /* Add our starting points to the worklist.  Almost always there will
295
     be only one.  It isn't inconceivable that we might one day directly
296
     support Fortran alternate entry points.  */
297
 
298
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
299
    {
300
      *tos++ = e->dest;
301
 
302
      /* Mark the block reachable.  */
303
      e->dest->flags |= BB_REACHABLE;
304
    }
305
 
306
  /* Iterate: find everything reachable from what we've already seen.  */
307
 
308
  while (tos != worklist)
309
    {
310
      basic_block b = *--tos;
311
 
312
      FOR_EACH_EDGE (e, ei, b->succs)
313
        {
314
          basic_block dest = e->dest;
315
 
316
          if (!(dest->flags & BB_REACHABLE))
317
            {
318
              *tos++ = dest;
319
              dest->flags |= BB_REACHABLE;
320
            }
321
        }
322
    }
323
 
324
  free (worklist);
325
}
326
 
327
/* Functions to access an edge list with a vector representation.
328
   Enough data is kept such that given an index number, the
329
   pred and succ that edge represents can be determined, or
330
   given a pred and a succ, its index number can be returned.
331
   This allows algorithms which consume a lot of memory to
332
   represent the normally full matrix of edge (pred,succ) with a
333
   single indexed vector,  edge (EDGE_INDEX (pred, succ)), with no
334
   wasted space in the client code due to sparse flow graphs.  */
335
 
336
/* This functions initializes the edge list. Basically the entire
337
   flowgraph is processed, and all edges are assigned a number,
338
   and the data structure is filled in.  */
339
 
340
struct edge_list *
341
create_edge_list (void)
342
{
343
  struct edge_list *elist;
344
  edge e;
345
  int num_edges;
346
  int block_count;
347
  basic_block bb;
348
  edge_iterator ei;
349
 
350
  block_count = n_basic_blocks; /* Include the entry and exit blocks.  */
351
 
352
  num_edges = 0;
353
 
354
  /* Determine the number of edges in the flow graph by counting successor
355
     edges on each basic block.  */
356
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
357
    {
358
      num_edges += EDGE_COUNT (bb->succs);
359
    }
360
 
361
  elist = XNEW (struct edge_list);
362
  elist->num_blocks = block_count;
363
  elist->num_edges = num_edges;
364
  elist->index_to_edge = XNEWVEC (edge, num_edges);
365
 
366
  num_edges = 0;
367
 
368
  /* Follow successors of blocks, and register these edges.  */
369
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
370
    FOR_EACH_EDGE (e, ei, bb->succs)
371
      elist->index_to_edge[num_edges++] = e;
372
 
373
  return elist;
374
}
375
 
376
/* This function free's memory associated with an edge list.  */
377
 
378
void
379
free_edge_list (struct edge_list *elist)
380
{
381
  if (elist)
382
    {
383
      free (elist->index_to_edge);
384
      free (elist);
385
    }
386
}
387
 
388
/* This function provides debug output showing an edge list.  */
389
 
390
void
391
print_edge_list (FILE *f, struct edge_list *elist)
392
{
393
  int x;
394
 
395
  fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
396
           elist->num_blocks, elist->num_edges);
397
 
398
  for (x = 0; x < elist->num_edges; x++)
399
    {
400
      fprintf (f, " %-4d - edge(", x);
401
      if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
402
        fprintf (f, "entry,");
403
      else
404
        fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
405
 
406
      if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
407
        fprintf (f, "exit)\n");
408
      else
409
        fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
410
    }
411
}
412
 
413
/* This function provides an internal consistency check of an edge list,
414
   verifying that all edges are present, and that there are no
415
   extra edges.  */
416
 
417
void
418
verify_edge_list (FILE *f, struct edge_list *elist)
419
{
420
  int pred, succ, index;
421
  edge e;
422
  basic_block bb, p, s;
423
  edge_iterator ei;
424
 
425
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
426
    {
427
      FOR_EACH_EDGE (e, ei, bb->succs)
428
        {
429
          pred = e->src->index;
430
          succ = e->dest->index;
431
          index = EDGE_INDEX (elist, e->src, e->dest);
432
          if (index == EDGE_INDEX_NO_EDGE)
433
            {
434
              fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ);
435
              continue;
436
            }
437
 
438
          if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
439
            fprintf (f, "*p* Pred for index %d should be %d not %d\n",
440
                     index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
441
          if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
442
            fprintf (f, "*p* Succ for index %d should be %d not %d\n",
443
                     index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
444
        }
445
    }
446
 
447
  /* We've verified that all the edges are in the list, now lets make sure
448
     there are no spurious edges in the list.  */
449
 
450
  FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
451
    FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
452
      {
453
        int found_edge = 0;
454
 
455
        FOR_EACH_EDGE (e, ei, p->succs)
456
          if (e->dest == s)
457
            {
458
              found_edge = 1;
459
              break;
460
            }
461
 
462
        FOR_EACH_EDGE (e, ei, s->preds)
463
          if (e->src == p)
464
            {
465
              found_edge = 1;
466
              break;
467
            }
468
 
469
        if (EDGE_INDEX (elist, p, s)
470
            == EDGE_INDEX_NO_EDGE && found_edge != 0)
471
          fprintf (f, "*** Edge (%d, %d) appears to not have an index\n",
472
                   p->index, s->index);
473
        if (EDGE_INDEX (elist, p, s)
474
            != EDGE_INDEX_NO_EDGE && found_edge == 0)
475
          fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n",
476
                   p->index, s->index, EDGE_INDEX (elist, p, s));
477
      }
478
}
479
 
480
/* Given PRED and SUCC blocks, return the edge which connects the blocks.
481
   If no such edge exists, return NULL.  */
482
 
483
edge
484
find_edge (basic_block pred, basic_block succ)
485
{
486
  edge e;
487
  edge_iterator ei;
488
 
489
  if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
490
    {
491
      FOR_EACH_EDGE (e, ei, pred->succs)
492
        if (e->dest == succ)
493
          return e;
494
    }
495
  else
496
    {
497
      FOR_EACH_EDGE (e, ei, succ->preds)
498
        if (e->src == pred)
499
          return e;
500
    }
501
 
502
  return NULL;
503
}
504
 
505
/* This routine will determine what, if any, edge there is between
506
   a specified predecessor and successor.  */
507
 
508
int
509
find_edge_index (struct edge_list *edge_list, basic_block pred, basic_block succ)
510
{
511
  int x;
512
 
513
  for (x = 0; x < NUM_EDGES (edge_list); x++)
514
    if (INDEX_EDGE_PRED_BB (edge_list, x) == pred
515
        && INDEX_EDGE_SUCC_BB (edge_list, x) == succ)
516
      return x;
517
 
518
  return (EDGE_INDEX_NO_EDGE);
519
}
520
 
521
/* Dump the list of basic blocks in the bitmap NODES.  */
522
 
523
void
524
flow_nodes_print (const char *str, const_sbitmap nodes, FILE *file)
525
{
526
  unsigned int node = 0;
527
  sbitmap_iterator sbi;
528
 
529
  if (! nodes)
530
    return;
531
 
532
  fprintf (file, "%s { ", str);
533
  EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, sbi)
534
    fprintf (file, "%d ", node);
535
  fputs ("}\n", file);
536
}
537
 
538
/* Dump the list of edges in the array EDGE_LIST.  */
539
 
540
void
541
flow_edge_list_print (const char *str, const edge *edge_list, int num_edges, FILE *file)
542
{
543
  int i;
544
 
545
  if (! edge_list)
546
    return;
547
 
548
  fprintf (file, "%s { ", str);
549
  for (i = 0; i < num_edges; i++)
550
    fprintf (file, "%d->%d ", edge_list[i]->src->index,
551
             edge_list[i]->dest->index);
552
 
553
  fputs ("}\n", file);
554
}
555
 
556
 
557
/* This routine will remove any fake predecessor edges for a basic block.
558
   When the edge is removed, it is also removed from whatever successor
559
   list it is in.  */
560
 
561
static void
562
remove_fake_predecessors (basic_block bb)
563
{
564
  edge e;
565
  edge_iterator ei;
566
 
567
  for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
568
    {
569
      if ((e->flags & EDGE_FAKE) == EDGE_FAKE)
570
        remove_edge (e);
571
      else
572
        ei_next (&ei);
573
    }
574
}
575
 
576
/* This routine will remove all fake edges from the flow graph.  If
577
   we remove all fake successors, it will automatically remove all
578
   fake predecessors.  */
579
 
580
void
581
remove_fake_edges (void)
582
{
583
  basic_block bb;
584
 
585
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
586
    remove_fake_predecessors (bb);
587
}
588
 
589
/* This routine will remove all fake edges to the EXIT_BLOCK.  */
590
 
591
void
592
remove_fake_exit_edges (void)
593
{
594
  remove_fake_predecessors (EXIT_BLOCK_PTR);
595
}
596
 
597
 
598
/* This function will add a fake edge between any block which has no
599
   successors, and the exit block. Some data flow equations require these
600
   edges to exist.  */
601
 
602
void
603
add_noreturn_fake_exit_edges (void)
604
{
605
  basic_block bb;
606
 
607
  FOR_EACH_BB (bb)
608
    if (EDGE_COUNT (bb->succs) == 0)
609
      make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
610
}
611
 
612
/* This function adds a fake edge between any infinite loops to the
613
   exit block.  Some optimizations require a path from each node to
614
   the exit node.
615
 
616
   See also Morgan, Figure 3.10, pp. 82-83.
617
 
618
   The current implementation is ugly, not attempting to minimize the
619
   number of inserted fake edges.  To reduce the number of fake edges
620
   to insert, add fake edges from _innermost_ loops containing only
621
   nodes not reachable from the exit block.  */
622
 
623
void
624
connect_infinite_loops_to_exit (void)
625
{
626
  basic_block unvisited_block = EXIT_BLOCK_PTR;
627
  struct depth_first_search_dsS dfs_ds;
628
 
629
  /* Perform depth-first search in the reverse graph to find nodes
630
     reachable from the exit block.  */
631
  flow_dfs_compute_reverse_init (&dfs_ds);
632
  flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR);
633
 
634
  /* Repeatedly add fake edges, updating the unreachable nodes.  */
635
  while (1)
636
    {
637
      unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds,
638
                                                          unvisited_block);
639
      if (!unvisited_block)
640
        break;
641
 
642
      make_edge (unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE);
643
      flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block);
644
    }
645
 
646
  flow_dfs_compute_reverse_finish (&dfs_ds);
647
  return;
648
}
649
 
650
/* Compute reverse top sort order.  This is computing a post order
651
   numbering of the graph.  If INCLUDE_ENTRY_EXIT is true, then then
652
   ENTRY_BLOCK and EXIT_BLOCK are included.  If DELETE_UNREACHABLE is
653
   true, unreachable blocks are deleted.  */
654
 
655
int
656
post_order_compute (int *post_order, bool include_entry_exit,
657
                    bool delete_unreachable)
658
{
659
  edge_iterator *stack;
660
  int sp;
661
  int post_order_num = 0;
662
  sbitmap visited;
663
  int count;
664
 
665
  if (include_entry_exit)
666
    post_order[post_order_num++] = EXIT_BLOCK;
667
 
668
  /* Allocate stack for back-tracking up CFG.  */
669
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
670
  sp = 0;
671
 
672
  /* Allocate bitmap to track nodes that have been visited.  */
673
  visited = sbitmap_alloc (last_basic_block);
674
 
675
  /* None of the nodes in the CFG have been visited yet.  */
676
  sbitmap_zero (visited);
677
 
678
  /* Push the first edge on to the stack.  */
679
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
680
 
681
  while (sp)
682
    {
683
      edge_iterator ei;
684
      basic_block src;
685
      basic_block dest;
686
 
687
      /* Look at the edge on the top of the stack.  */
688
      ei = stack[sp - 1];
689
      src = ei_edge (ei)->src;
690
      dest = ei_edge (ei)->dest;
691
 
692
      /* Check if the edge destination has been visited yet.  */
693
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
694
        {
695
          /* Mark that we have visited the destination.  */
696
          SET_BIT (visited, dest->index);
697
 
698
          if (EDGE_COUNT (dest->succs) > 0)
699
            /* Since the DEST node has been visited for the first
700
               time, check its successors.  */
701
            stack[sp++] = ei_start (dest->succs);
702
          else
703
            post_order[post_order_num++] = dest->index;
704
        }
705
      else
706
        {
707
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
708
            post_order[post_order_num++] = src->index;
709
 
710
          if (!ei_one_before_end_p (ei))
711
            ei_next (&stack[sp - 1]);
712
          else
713
            sp--;
714
        }
715
    }
716
 
717
  if (include_entry_exit)
718
    {
719
      post_order[post_order_num++] = ENTRY_BLOCK;
720
      count = post_order_num;
721
    }
722
  else
723
    count = post_order_num + 2;
724
 
725
  /* Delete the unreachable blocks if some were found and we are
726
     supposed to do it.  */
727
  if (delete_unreachable && (count != n_basic_blocks))
728
    {
729
      basic_block b;
730
      basic_block next_bb;
731
      for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
732
        {
733
          next_bb = b->next_bb;
734
 
735
          if (!(TEST_BIT (visited, b->index)))
736
            delete_basic_block (b);
737
        }
738
 
739
      tidy_fallthru_edges ();
740
    }
741
 
742
  free (stack);
743
  sbitmap_free (visited);
744
  return post_order_num;
745
}
746
 
747
 
748
/* Helper routine for inverted_post_order_compute.
749
   BB has to belong to a region of CFG
750
   unreachable by inverted traversal from the exit.
751
   i.e. there's no control flow path from ENTRY to EXIT
752
   that contains this BB.
753
   This can happen in two cases - if there's an infinite loop
754
   or if there's a block that has no successor
755
   (call to a function with no return).
756
   Some RTL passes deal with this condition by
757
   calling connect_infinite_loops_to_exit () and/or
758
   add_noreturn_fake_exit_edges ().
759
   However, those methods involve modifying the CFG itself
760
   which may not be desirable.
761
   Hence, we deal with the infinite loop/no return cases
762
   by identifying a unique basic block that can reach all blocks
763
   in such a region by inverted traversal.
764
   This function returns a basic block that guarantees
765
   that all blocks in the region are reachable
766
   by starting an inverted traversal from the returned block.  */
767
 
768
static basic_block
769
dfs_find_deadend (basic_block bb)
770
{
771
  sbitmap visited = sbitmap_alloc (last_basic_block);
772
  sbitmap_zero (visited);
773
 
774
  for (;;)
775
    {
776
      SET_BIT (visited, bb->index);
777
      if (EDGE_COUNT (bb->succs) == 0
778
          || TEST_BIT (visited, EDGE_SUCC (bb, 0)->dest->index))
779
        {
780
          sbitmap_free (visited);
781
          return bb;
782
        }
783
 
784
      bb = EDGE_SUCC (bb, 0)->dest;
785
    }
786
 
787
  gcc_unreachable ();
788
}
789
 
790
 
791
/* Compute the reverse top sort order of the inverted CFG
792
   i.e. starting from the exit block and following the edges backward
793
   (from successors to predecessors).
794
   This ordering can be used for forward dataflow problems among others.
795
 
796
   This function assumes that all blocks in the CFG are reachable
797
   from the ENTRY (but not necessarily from EXIT).
798
 
799
   If there's an infinite loop,
800
   a simple inverted traversal starting from the blocks
801
   with no successors can't visit all blocks.
802
   To solve this problem, we first do inverted traversal
803
   starting from the blocks with no successor.
804
   And if there's any block left that's not visited by the regular
805
   inverted traversal from EXIT,
806
   those blocks are in such problematic region.
807
   Among those, we find one block that has
808
   any visited predecessor (which is an entry into such a region),
809
   and start looking for a "dead end" from that block
810
   and do another inverted traversal from that block.  */
811
 
812
int
813
inverted_post_order_compute (int *post_order)
814
{
815
  basic_block bb;
816
  edge_iterator *stack;
817
  int sp;
818
  int post_order_num = 0;
819
  sbitmap visited;
820
 
821
  /* Allocate stack for back-tracking up CFG.  */
822
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
823
  sp = 0;
824
 
825
  /* Allocate bitmap to track nodes that have been visited.  */
826
  visited = sbitmap_alloc (last_basic_block);
827
 
828
  /* None of the nodes in the CFG have been visited yet.  */
829
  sbitmap_zero (visited);
830
 
831
  /* Put all blocks that have no successor into the initial work list.  */
832
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
833
    if (EDGE_COUNT (bb->succs) == 0)
834
      {
835
        /* Push the initial edge on to the stack.  */
836
        if (EDGE_COUNT (bb->preds) > 0)
837
          {
838
            stack[sp++] = ei_start (bb->preds);
839
            SET_BIT (visited, bb->index);
840
          }
841
      }
842
 
843
  do
844
    {
845
      bool has_unvisited_bb = false;
846
 
847
      /* The inverted traversal loop. */
848
      while (sp)
849
        {
850
          edge_iterator ei;
851
          basic_block pred;
852
 
853
          /* Look at the edge on the top of the stack.  */
854
          ei = stack[sp - 1];
855
          bb = ei_edge (ei)->dest;
856
          pred = ei_edge (ei)->src;
857
 
858
          /* Check if the predecessor has been visited yet.  */
859
          if (! TEST_BIT (visited, pred->index))
860
            {
861
              /* Mark that we have visited the destination.  */
862
              SET_BIT (visited, pred->index);
863
 
864
              if (EDGE_COUNT (pred->preds) > 0)
865
                /* Since the predecessor node has been visited for the first
866
                   time, check its predecessors.  */
867
                stack[sp++] = ei_start (pred->preds);
868
              else
869
                post_order[post_order_num++] = pred->index;
870
            }
871
          else
872
            {
873
              if (bb != EXIT_BLOCK_PTR && ei_one_before_end_p (ei))
874
                post_order[post_order_num++] = bb->index;
875
 
876
              if (!ei_one_before_end_p (ei))
877
                ei_next (&stack[sp - 1]);
878
              else
879
                sp--;
880
            }
881
        }
882
 
883
      /* Detect any infinite loop and activate the kludge.
884
         Note that this doesn't check EXIT_BLOCK itself
885
         since EXIT_BLOCK is always added after the outer do-while loop.  */
886
      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
887
        if (!TEST_BIT (visited, bb->index))
888
          {
889
            has_unvisited_bb = true;
890
 
891
            if (EDGE_COUNT (bb->preds) > 0)
892
              {
893
                edge_iterator ei;
894
                edge e;
895
                basic_block visited_pred = NULL;
896
 
897
                /* Find an already visited predecessor.  */
898
                FOR_EACH_EDGE (e, ei, bb->preds)
899
                  {
900
                    if (TEST_BIT (visited, e->src->index))
901
                      visited_pred = e->src;
902
                  }
903
 
904
                if (visited_pred)
905
                  {
906
                    basic_block be = dfs_find_deadend (bb);
907
                    gcc_assert (be != NULL);
908
                    SET_BIT (visited, be->index);
909
                    stack[sp++] = ei_start (be->preds);
910
                    break;
911
                  }
912
              }
913
          }
914
 
915
      if (has_unvisited_bb && sp == 0)
916
        {
917
          /* No blocks are reachable from EXIT at all.
918
             Find a dead-end from the ENTRY, and restart the iteration. */
919
          basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR);
920
          gcc_assert (be != NULL);
921
          SET_BIT (visited, be->index);
922
          stack[sp++] = ei_start (be->preds);
923
        }
924
 
925
      /* The only case the below while fires is
926
         when there's an infinite loop.  */
927
    }
928
  while (sp);
929
 
930
  /* EXIT_BLOCK is always included.  */
931
  post_order[post_order_num++] = EXIT_BLOCK;
932
 
933
  free (stack);
934
  sbitmap_free (visited);
935
  return post_order_num;
936
}
937
 
938
/* Compute the depth first search order and store in the array
939
  PRE_ORDER if nonzero, marking the nodes visited in VISITED.  If
940
  REV_POST_ORDER is nonzero, return the reverse completion number for each
941
  node.  Returns the number of nodes visited.  A depth first search
942
  tries to get as far away from the starting point as quickly as
943
  possible.
944
 
945
  pre_order is a really a preorder numbering of the graph.
946
  rev_post_order is really a reverse postorder numbering of the graph.
947
 */
948
 
949
int
950
pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
951
                                bool include_entry_exit)
952
{
953
  edge_iterator *stack;
954
  int sp;
955
  int pre_order_num = 0;
956
  int rev_post_order_num = n_basic_blocks - 1;
957
  sbitmap visited;
958
 
959
  /* Allocate stack for back-tracking up CFG.  */
960
  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
961
  sp = 0;
962
 
963
  if (include_entry_exit)
964
    {
965
      if (pre_order)
966
        pre_order[pre_order_num] = ENTRY_BLOCK;
967
      pre_order_num++;
968
      if (rev_post_order)
969
        rev_post_order[rev_post_order_num--] = ENTRY_BLOCK;
970
    }
971
  else
972
    rev_post_order_num -= NUM_FIXED_BLOCKS;
973
 
974
  /* Allocate bitmap to track nodes that have been visited.  */
975
  visited = sbitmap_alloc (last_basic_block);
976
 
977
  /* None of the nodes in the CFG have been visited yet.  */
978
  sbitmap_zero (visited);
979
 
980
  /* Push the first edge on to the stack.  */
981
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
982
 
983
  while (sp)
984
    {
985
      edge_iterator ei;
986
      basic_block src;
987
      basic_block dest;
988
 
989
      /* Look at the edge on the top of the stack.  */
990
      ei = stack[sp - 1];
991
      src = ei_edge (ei)->src;
992
      dest = ei_edge (ei)->dest;
993
 
994
      /* Check if the edge destination has been visited yet.  */
995
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
996
        {
997
          /* Mark that we have visited the destination.  */
998
          SET_BIT (visited, dest->index);
999
 
1000
          if (pre_order)
1001
            pre_order[pre_order_num] = dest->index;
1002
 
1003
          pre_order_num++;
1004
 
1005
          if (EDGE_COUNT (dest->succs) > 0)
1006
            /* Since the DEST node has been visited for the first
1007
               time, check its successors.  */
1008
            stack[sp++] = ei_start (dest->succs);
1009
          else if (rev_post_order)
1010
            /* There are no successors for the DEST node so assign
1011
               its reverse completion number.  */
1012
            rev_post_order[rev_post_order_num--] = dest->index;
1013
        }
1014
      else
1015
        {
1016
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR
1017
              && rev_post_order)
1018
            /* There are no more successors for the SRC node
1019
               so assign its reverse completion number.  */
1020
            rev_post_order[rev_post_order_num--] = src->index;
1021
 
1022
          if (!ei_one_before_end_p (ei))
1023
            ei_next (&stack[sp - 1]);
1024
          else
1025
            sp--;
1026
        }
1027
    }
1028
 
1029
  free (stack);
1030
  sbitmap_free (visited);
1031
 
1032
  if (include_entry_exit)
1033
    {
1034
      if (pre_order)
1035
        pre_order[pre_order_num] = EXIT_BLOCK;
1036
      pre_order_num++;
1037
      if (rev_post_order)
1038
        rev_post_order[rev_post_order_num--] = EXIT_BLOCK;
1039
      /* The number of nodes visited should be the number of blocks.  */
1040
      gcc_assert (pre_order_num == n_basic_blocks);
1041
    }
1042
  else
1043
    /* The number of nodes visited should be the number of blocks minus
1044
       the entry and exit blocks which are not visited here.  */
1045
    gcc_assert (pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS);
1046
 
1047
  return pre_order_num;
1048
}
1049
 
1050
/* Compute the depth first search order on the _reverse_ graph and
1051
   store in the array DFS_ORDER, marking the nodes visited in VISITED.
1052
   Returns the number of nodes visited.
1053
 
1054
   The computation is split into three pieces:
1055
 
1056
   flow_dfs_compute_reverse_init () creates the necessary data
1057
   structures.
1058
 
1059
   flow_dfs_compute_reverse_add_bb () adds a basic block to the data
1060
   structures.  The block will start the search.
1061
 
1062
   flow_dfs_compute_reverse_execute () continues (or starts) the
1063
   search using the block on the top of the stack, stopping when the
1064
   stack is empty.
1065
 
1066
   flow_dfs_compute_reverse_finish () destroys the necessary data
1067
   structures.
1068
 
1069
   Thus, the user will probably call ..._init(), call ..._add_bb() to
1070
   add a beginning basic block to the stack, call ..._execute(),
1071
   possibly add another bb to the stack and again call ..._execute(),
1072
   ..., and finally call _finish().  */
1073
 
1074
/* Initialize the data structures used for depth-first search on the
1075
   reverse graph.  If INITIALIZE_STACK is nonzero, the exit block is
1076
   added to the basic block stack.  DATA is the current depth-first
1077
   search context.  If INITIALIZE_STACK is nonzero, there is an
1078
   element on the stack.  */
1079
 
1080
static void
1081
flow_dfs_compute_reverse_init (depth_first_search_ds data)
1082
{
1083
  /* Allocate stack for back-tracking up CFG.  */
1084
  data->stack = XNEWVEC (basic_block, n_basic_blocks);
1085
  data->sp = 0;
1086
 
1087
  /* Allocate bitmap to track nodes that have been visited.  */
1088
  data->visited_blocks = sbitmap_alloc (last_basic_block);
1089
 
1090
  /* None of the nodes in the CFG have been visited yet.  */
1091
  sbitmap_zero (data->visited_blocks);
1092
 
1093
  return;
1094
}
1095
 
1096
/* Add the specified basic block to the top of the dfs data
1097
   structures.  When the search continues, it will start at the
1098
   block.  */
1099
 
1100
static void
1101
flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb)
1102
{
1103
  data->stack[data->sp++] = bb;
1104
  SET_BIT (data->visited_blocks, bb->index);
1105
}
1106
 
1107
/* Continue the depth-first search through the reverse graph starting with the
1108
   block at the stack's top and ending when the stack is empty.  Visited nodes
1109
   are marked.  Returns an unvisited basic block, or NULL if there is none
1110
   available.  */
1111
 
1112
static basic_block
1113
flow_dfs_compute_reverse_execute (depth_first_search_ds data,
1114
                                  basic_block last_unvisited)
1115
{
1116
  basic_block bb;
1117
  edge e;
1118
  edge_iterator ei;
1119
 
1120
  while (data->sp > 0)
1121
    {
1122
      bb = data->stack[--data->sp];
1123
 
1124
      /* Perform depth-first search on adjacent vertices.  */
1125
      FOR_EACH_EDGE (e, ei, bb->preds)
1126
        if (!TEST_BIT (data->visited_blocks, e->src->index))
1127
          flow_dfs_compute_reverse_add_bb (data, e->src);
1128
    }
1129
 
1130
  /* Determine if there are unvisited basic blocks.  */
1131
  FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb)
1132
    if (!TEST_BIT (data->visited_blocks, bb->index))
1133
      return bb;
1134
 
1135
  return NULL;
1136
}
1137
 
1138
/* Destroy the data structures needed for depth-first search on the
1139
   reverse graph.  */
1140
 
1141
static void
1142
flow_dfs_compute_reverse_finish (depth_first_search_ds data)
1143
{
1144
  free (data->stack);
1145
  sbitmap_free (data->visited_blocks);
1146
}
1147
 
1148
/* Performs dfs search from BB over vertices satisfying PREDICATE;
1149
   if REVERSE, go against direction of edges.  Returns number of blocks
1150
   found and their list in RSLT.  RSLT can contain at most RSLT_MAX items.  */
1151
int
1152
dfs_enumerate_from (basic_block bb, int reverse,
1153
                    bool (*predicate) (const_basic_block, const void *),
1154
                    basic_block *rslt, int rslt_max, const void *data)
1155
{
1156
  basic_block *st, lbb;
1157
  int sp = 0, tv = 0;
1158
  unsigned size;
1159
 
1160
  /* A bitmap to keep track of visited blocks.  Allocating it each time
1161
     this function is called is not possible, since dfs_enumerate_from
1162
     is often used on small (almost) disjoint parts of cfg (bodies of
1163
     loops), and allocating a large sbitmap would lead to quadratic
1164
     behavior.  */
1165
  static sbitmap visited;
1166
  static unsigned v_size;
1167
 
1168
#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
1169
#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index))
1170
#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
1171
 
1172
  /* Resize the VISITED sbitmap if necessary.  */
1173
  size = last_basic_block;
1174
  if (size < 10)
1175
    size = 10;
1176
 
1177
  if (!visited)
1178
    {
1179
 
1180
      visited = sbitmap_alloc (size);
1181
      sbitmap_zero (visited);
1182
      v_size = size;
1183
    }
1184
  else if (v_size < size)
1185
    {
1186
      /* Ensure that we increase the size of the sbitmap exponentially.  */
1187
      if (2 * v_size > size)
1188
        size = 2 * v_size;
1189
 
1190
      visited = sbitmap_resize (visited, size, 0);
1191
      v_size = size;
1192
    }
1193
 
1194
  st = XCNEWVEC (basic_block, rslt_max);
1195
  rslt[tv++] = st[sp++] = bb;
1196
  MARK_VISITED (bb);
1197
  while (sp)
1198
    {
1199
      edge e;
1200
      edge_iterator ei;
1201
      lbb = st[--sp];
1202
      if (reverse)
1203
        {
1204
          FOR_EACH_EDGE (e, ei, lbb->preds)
1205
            if (!VISITED_P (e->src) && predicate (e->src, data))
1206
              {
1207
                gcc_assert (tv != rslt_max);
1208
                rslt[tv++] = st[sp++] = e->src;
1209
                MARK_VISITED (e->src);
1210
              }
1211
        }
1212
      else
1213
        {
1214
          FOR_EACH_EDGE (e, ei, lbb->succs)
1215
            if (!VISITED_P (e->dest) && predicate (e->dest, data))
1216
              {
1217
                gcc_assert (tv != rslt_max);
1218
                rslt[tv++] = st[sp++] = e->dest;
1219
                MARK_VISITED (e->dest);
1220
              }
1221
        }
1222
    }
1223
  free (st);
1224
  for (sp = 0; sp < tv; sp++)
1225
    UNMARK_VISITED (rslt[sp]);
1226
  return tv;
1227
#undef MARK_VISITED
1228
#undef UNMARK_VISITED
1229
#undef VISITED_P
1230
}
1231
 
1232
 
1233
/* Compute dominance frontiers, ala Harvey, Ferrante, et al.
1234
 
1235
   This algorithm can be found in Timothy Harvey's PhD thesis, at
1236
   http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative
1237
   dominance algorithms.
1238
 
1239
   First, we identify each join point, j (any node with more than one
1240
   incoming edge is a join point).
1241
 
1242
   We then examine each predecessor, p, of j and walk up the dominator tree
1243
   starting at p.
1244
 
1245
   We stop the walk when we reach j's immediate dominator - j is in the
1246
   dominance frontier of each of  the nodes in the walk, except for j's
1247
   immediate dominator. Intuitively, all of the rest of j's dominators are
1248
   shared by j's predecessors as well.
1249
   Since they dominate j, they will not have j in their dominance frontiers.
1250
 
1251
   The number of nodes touched by this algorithm is equal to the size
1252
   of the dominance frontiers, no more, no less.
1253
*/
1254
 
1255
 
1256
static void
1257
compute_dominance_frontiers_1 (bitmap *frontiers)
1258
{
1259
  edge p;
1260
  edge_iterator ei;
1261
  basic_block b;
1262
  FOR_EACH_BB (b)
1263
    {
1264
      if (EDGE_COUNT (b->preds) >= 2)
1265
        {
1266
          FOR_EACH_EDGE (p, ei, b->preds)
1267
            {
1268
              basic_block runner = p->src;
1269
              basic_block domsb;
1270
              if (runner == ENTRY_BLOCK_PTR)
1271
                continue;
1272
 
1273
              domsb = get_immediate_dominator (CDI_DOMINATORS, b);
1274
              while (runner != domsb)
1275
                {
1276
                  if (bitmap_bit_p (frontiers[runner->index], b->index))
1277
                    break;
1278
                  bitmap_set_bit (frontiers[runner->index],
1279
                                  b->index);
1280
                  runner = get_immediate_dominator (CDI_DOMINATORS,
1281
                                                    runner);
1282
                }
1283
            }
1284
        }
1285
    }
1286
}
1287
 
1288
 
1289
void
1290
compute_dominance_frontiers (bitmap *frontiers)
1291
{
1292
  timevar_push (TV_DOM_FRONTIERS);
1293
 
1294
  compute_dominance_frontiers_1 (frontiers);
1295
 
1296
  timevar_pop (TV_DOM_FRONTIERS);
1297
}
1298
 
1299
/* Given a set of blocks with variable definitions (DEF_BLOCKS),
1300
   return a bitmap with all the blocks in the iterated dominance
1301
   frontier of the blocks in DEF_BLOCKS.  DFS contains dominance
1302
   frontier information as returned by compute_dominance_frontiers.
1303
 
1304
   The resulting set of blocks are the potential sites where PHI nodes
1305
   are needed.  The caller is responsible for freeing the memory
1306
   allocated for the return value.  */
1307
 
1308
bitmap
1309
compute_idf (bitmap def_blocks, bitmap *dfs)
1310
{
1311
  bitmap_iterator bi;
1312
  unsigned bb_index, i;
1313
  VEC(int,heap) *work_stack;
1314
  bitmap phi_insertion_points;
1315
 
1316
  work_stack = VEC_alloc (int, heap, n_basic_blocks);
1317
  phi_insertion_points = BITMAP_ALLOC (NULL);
1318
 
1319
  /* Seed the work list with all the blocks in DEF_BLOCKS.  We use
1320
     VEC_quick_push here for speed.  This is safe because we know that
1321
     the number of definition blocks is no greater than the number of
1322
     basic blocks, which is the initial capacity of WORK_STACK.  */
1323
  EXECUTE_IF_SET_IN_BITMAP (def_blocks, 0, bb_index, bi)
1324
    VEC_quick_push (int, work_stack, bb_index);
1325
 
1326
  /* Pop a block off the worklist, add every block that appears in
1327
     the original block's DF that we have not already processed to
1328
     the worklist.  Iterate until the worklist is empty.   Blocks
1329
     which are added to the worklist are potential sites for
1330
     PHI nodes.  */
1331
  while (VEC_length (int, work_stack) > 0)
1332
    {
1333
      bb_index = VEC_pop (int, work_stack);
1334
 
1335
      /* Since the registration of NEW -> OLD name mappings is done
1336
         separately from the call to update_ssa, when updating the SSA
1337
         form, the basic blocks where new and/or old names are defined
1338
         may have disappeared by CFG cleanup calls.  In this case,
1339
         we may pull a non-existing block from the work stack.  */
1340
      gcc_assert (bb_index < (unsigned) last_basic_block);
1341
 
1342
      EXECUTE_IF_AND_COMPL_IN_BITMAP (dfs[bb_index], phi_insertion_points,
1343
                                      0, i, bi)
1344
        {
1345
          /* Use a safe push because if there is a definition of VAR
1346
             in every basic block, then WORK_STACK may eventually have
1347
             more than N_BASIC_BLOCK entries.  */
1348
          VEC_safe_push (int, heap, work_stack, i);
1349
          bitmap_set_bit (phi_insertion_points, i);
1350
        }
1351
    }
1352
 
1353
  VEC_free (int, heap, work_stack);
1354
 
1355
  return phi_insertion_points;
1356
}
1357
 
1358
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.