OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-gcc/] [gcc-4.1.1/] [gcc/] [cfganal.c] - Blame information for rev 20

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 12 jlechner
/* Control flow graph analysis code for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2003, 2004, 2005 Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 2, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING.  If not, write to the Free
19
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20
02110-1301, USA.  */
21
 
22
/* This file contains various simple utilities to analyze the CFG.  */
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "rtl.h"
28
#include "obstack.h"
29
#include "hard-reg-set.h"
30
#include "basic-block.h"
31
#include "insn-config.h"
32
#include "recog.h"
33
#include "toplev.h"
34
#include "tm_p.h"
35
#include "timevar.h"
36
 
37
/* Store the data structures necessary for depth-first search.  */
38
struct depth_first_search_dsS {
39
  /* stack for backtracking during the algorithm */
40
  basic_block *stack;
41
 
42
  /* number of edges in the stack.  That is, positions 0, ..., sp-1
43
     have edges.  */
44
  unsigned int sp;
45
 
46
  /* record of basic blocks already seen by depth-first search */
47
  sbitmap visited_blocks;
48
};
49
typedef struct depth_first_search_dsS *depth_first_search_ds;
50
 
51
static void flow_dfs_compute_reverse_init (depth_first_search_ds);
52
static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds,
53
                                             basic_block);
54
static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds,
55
                                                     basic_block);
56
static void flow_dfs_compute_reverse_finish (depth_first_search_ds);
57
static bool flow_active_insn_p (rtx);
58
 
59
/* Like active_insn_p, except keep the return value clobber around
60
   even after reload.  */
61
 
62
static bool
63
flow_active_insn_p (rtx insn)
64
{
65
  if (active_insn_p (insn))
66
    return true;
67
 
68
  /* A clobber of the function return value exists for buggy
69
     programs that fail to return a value.  Its effect is to
70
     keep the return value from being live across the entire
71
     function.  If we allow it to be skipped, we introduce the
72
     possibility for register lifetime confusion.  */
73
  if (GET_CODE (PATTERN (insn)) == CLOBBER
74
      && REG_P (XEXP (PATTERN (insn), 0))
75
      && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
76
    return true;
77
 
78
  return false;
79
}
80
 
81
/* Return true if the block has no effect and only forwards control flow to
82
   its single destination.  */
83
 
84
bool
85
forwarder_block_p (basic_block bb)
86
{
87
  rtx insn;
88
 
89
  if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
90
      || !single_succ_p (bb))
91
    return false;
92
 
93
  for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
94
    if (INSN_P (insn) && flow_active_insn_p (insn))
95
      return false;
96
 
97
  return (!INSN_P (insn)
98
          || (JUMP_P (insn) && simplejump_p (insn))
99
          || !flow_active_insn_p (insn));
100
}
101
 
102
/* Return nonzero if we can reach target from src by falling through.  */
103
 
104
bool
105
can_fallthru (basic_block src, basic_block target)
106
{
107
  rtx insn = BB_END (src);
108
  rtx insn2;
109
  edge e;
110
  edge_iterator ei;
111
 
112
  if (target == EXIT_BLOCK_PTR)
113
    return true;
114
  if (src->next_bb != target)
115
    return 0;
116
  FOR_EACH_EDGE (e, ei, src->succs)
117
    if (e->dest == EXIT_BLOCK_PTR
118
        && e->flags & EDGE_FALLTHRU)
119
      return 0;
120
 
121
  insn2 = BB_HEAD (target);
122
  if (insn2 && !active_insn_p (insn2))
123
    insn2 = next_active_insn (insn2);
124
 
125
  /* ??? Later we may add code to move jump tables offline.  */
126
  return next_active_insn (insn) == insn2;
127
}
128
 
129
/* Return nonzero if we could reach target from src by falling through,
130
   if the target was made adjacent.  If we already have a fall-through
131
   edge to the exit block, we can't do that.  */
132
bool
133
could_fall_through (basic_block src, basic_block target)
134
{
135
  edge e;
136
  edge_iterator ei;
137
 
138
  if (target == EXIT_BLOCK_PTR)
139
    return true;
140
  FOR_EACH_EDGE (e, ei, src->succs)
141
    if (e->dest == EXIT_BLOCK_PTR
142
        && e->flags & EDGE_FALLTHRU)
143
      return 0;
144
  return true;
145
}
146
 
147
/* Mark the back edges in DFS traversal.
148
   Return nonzero if a loop (natural or otherwise) is present.
149
   Inspired by Depth_First_Search_PP described in:
150
 
151
     Advanced Compiler Design and Implementation
152
     Steven Muchnick
153
     Morgan Kaufmann, 1997
154
 
155
   and heavily borrowed from flow_depth_first_order_compute.  */
156
 
157
bool
158
mark_dfs_back_edges (void)
159
{
160
  edge_iterator *stack;
161
  int *pre;
162
  int *post;
163
  int sp;
164
  int prenum = 1;
165
  int postnum = 1;
166
  sbitmap visited;
167
  bool found = false;
168
 
169
  /* Allocate the preorder and postorder number arrays.  */
170
  pre = xcalloc (last_basic_block, sizeof (int));
171
  post = xcalloc (last_basic_block, sizeof (int));
172
 
173
  /* Allocate stack for back-tracking up CFG.  */
174
  stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
175
  sp = 0;
176
 
177
  /* Allocate bitmap to track nodes that have been visited.  */
178
  visited = sbitmap_alloc (last_basic_block);
179
 
180
  /* None of the nodes in the CFG have been visited yet.  */
181
  sbitmap_zero (visited);
182
 
183
  /* Push the first edge on to the stack.  */
184
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
185
 
186
  while (sp)
187
    {
188
      edge_iterator ei;
189
      basic_block src;
190
      basic_block dest;
191
 
192
      /* Look at the edge on the top of the stack.  */
193
      ei = stack[sp - 1];
194
      src = ei_edge (ei)->src;
195
      dest = ei_edge (ei)->dest;
196
      ei_edge (ei)->flags &= ~EDGE_DFS_BACK;
197
 
198
      /* Check if the edge destination has been visited yet.  */
199
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
200
        {
201
          /* Mark that we have visited the destination.  */
202
          SET_BIT (visited, dest->index);
203
 
204
          pre[dest->index] = prenum++;
205
          if (EDGE_COUNT (dest->succs) > 0)
206
            {
207
              /* Since the DEST node has been visited for the first
208
                 time, check its successors.  */
209
              stack[sp++] = ei_start (dest->succs);
210
            }
211
          else
212
            post[dest->index] = postnum++;
213
        }
214
      else
215
        {
216
          if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
217
              && pre[src->index] >= pre[dest->index]
218
              && post[dest->index] == 0)
219
            ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true;
220
 
221
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
222
            post[src->index] = postnum++;
223
 
224
          if (!ei_one_before_end_p (ei))
225
            ei_next (&stack[sp - 1]);
226
          else
227
            sp--;
228
        }
229
    }
230
 
231
  free (pre);
232
  free (post);
233
  free (stack);
234
  sbitmap_free (visited);
235
 
236
  return found;
237
}
238
 
239
/* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru.  */
240
 
241
void
242
set_edge_can_fallthru_flag (void)
243
{
244
  basic_block bb;
245
 
246
  FOR_EACH_BB (bb)
247
    {
248
      edge e;
249
      edge_iterator ei;
250
 
251
      FOR_EACH_EDGE (e, ei, bb->succs)
252
        {
253
          e->flags &= ~EDGE_CAN_FALLTHRU;
254
 
255
          /* The FALLTHRU edge is also CAN_FALLTHRU edge.  */
256
          if (e->flags & EDGE_FALLTHRU)
257
            e->flags |= EDGE_CAN_FALLTHRU;
258
        }
259
 
260
      /* If the BB ends with an invertible condjump all (2) edges are
261
         CAN_FALLTHRU edges.  */
262
      if (EDGE_COUNT (bb->succs) != 2)
263
        continue;
264
      if (!any_condjump_p (BB_END (bb)))
265
        continue;
266
      if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0))
267
        continue;
268
      invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0);
269
      EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU;
270
      EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU;
271
    }
272
}
273
 
274
/* Find unreachable blocks.  An unreachable block will have 0 in
275
   the reachable bit in block->flags.  A nonzero value indicates the
276
   block is reachable.  */
277
 
278
void
279
find_unreachable_blocks (void)
280
{
281
  edge e;
282
  edge_iterator ei;
283
  basic_block *tos, *worklist, bb;
284
 
285
  tos = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
286
 
287
  /* Clear all the reachability flags.  */
288
 
289
  FOR_EACH_BB (bb)
290
    bb->flags &= ~BB_REACHABLE;
291
 
292
  /* Add our starting points to the worklist.  Almost always there will
293
     be only one.  It isn't inconceivable that we might one day directly
294
     support Fortran alternate entry points.  */
295
 
296
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
297
    {
298
      *tos++ = e->dest;
299
 
300
      /* Mark the block reachable.  */
301
      e->dest->flags |= BB_REACHABLE;
302
    }
303
 
304
  /* Iterate: find everything reachable from what we've already seen.  */
305
 
306
  while (tos != worklist)
307
    {
308
      basic_block b = *--tos;
309
 
310
      FOR_EACH_EDGE (e, ei, b->succs)
311
        {
312
          basic_block dest = e->dest;
313
 
314
          if (!(dest->flags & BB_REACHABLE))
315
            {
316
              *tos++ = dest;
317
              dest->flags |= BB_REACHABLE;
318
            }
319
        }
320
    }
321
 
322
  free (worklist);
323
}
324
 
325
/* Functions to access an edge list with a vector representation.
326
   Enough data is kept such that given an index number, the
327
   pred and succ that edge represents can be determined, or
328
   given a pred and a succ, its index number can be returned.
329
   This allows algorithms which consume a lot of memory to
330
   represent the normally full matrix of edge (pred,succ) with a
331
   single indexed vector,  edge (EDGE_INDEX (pred, succ)), with no
332
   wasted space in the client code due to sparse flow graphs.  */
333
 
334
/* This functions initializes the edge list. Basically the entire
335
   flowgraph is processed, and all edges are assigned a number,
336
   and the data structure is filled in.  */
337
 
338
struct edge_list *
339
create_edge_list (void)
340
{
341
  struct edge_list *elist;
342
  edge e;
343
  int num_edges;
344
  int block_count;
345
  basic_block bb;
346
  edge_iterator ei;
347
 
348
  block_count = n_basic_blocks + 2;   /* Include the entry and exit blocks.  */
349
 
350
  num_edges = 0;
351
 
352
  /* Determine the number of edges in the flow graph by counting successor
353
     edges on each basic block.  */
354
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
355
    {
356
      num_edges += EDGE_COUNT (bb->succs);
357
    }
358
 
359
  elist = xmalloc (sizeof (struct edge_list));
360
  elist->num_blocks = block_count;
361
  elist->num_edges = num_edges;
362
  elist->index_to_edge = xmalloc (sizeof (edge) * num_edges);
363
 
364
  num_edges = 0;
365
 
366
  /* Follow successors of blocks, and register these edges.  */
367
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
368
    FOR_EACH_EDGE (e, ei, bb->succs)
369
      elist->index_to_edge[num_edges++] = e;
370
 
371
  return elist;
372
}
373
 
374
/* This function free's memory associated with an edge list.  */
375
 
376
void
377
free_edge_list (struct edge_list *elist)
378
{
379
  if (elist)
380
    {
381
      free (elist->index_to_edge);
382
      free (elist);
383
    }
384
}
385
 
386
/* This function provides debug output showing an edge list.  */
387
 
388
void
389
print_edge_list (FILE *f, struct edge_list *elist)
390
{
391
  int x;
392
 
393
  fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
394
           elist->num_blocks - 2, elist->num_edges);
395
 
396
  for (x = 0; x < elist->num_edges; x++)
397
    {
398
      fprintf (f, " %-4d - edge(", x);
399
      if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
400
        fprintf (f, "entry,");
401
      else
402
        fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
403
 
404
      if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
405
        fprintf (f, "exit)\n");
406
      else
407
        fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
408
    }
409
}
410
 
411
/* This function provides an internal consistency check of an edge list,
412
   verifying that all edges are present, and that there are no
413
   extra edges.  */
414
 
415
void
416
verify_edge_list (FILE *f, struct edge_list *elist)
417
{
418
  int pred, succ, index;
419
  edge e;
420
  basic_block bb, p, s;
421
  edge_iterator ei;
422
 
423
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
424
    {
425
      FOR_EACH_EDGE (e, ei, bb->succs)
426
        {
427
          pred = e->src->index;
428
          succ = e->dest->index;
429
          index = EDGE_INDEX (elist, e->src, e->dest);
430
          if (index == EDGE_INDEX_NO_EDGE)
431
            {
432
              fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ);
433
              continue;
434
            }
435
 
436
          if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
437
            fprintf (f, "*p* Pred for index %d should be %d not %d\n",
438
                     index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
439
          if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
440
            fprintf (f, "*p* Succ for index %d should be %d not %d\n",
441
                     index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
442
        }
443
    }
444
 
445
  /* We've verified that all the edges are in the list, now lets make sure
446
     there are no spurious edges in the list.  */
447
 
448
  FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
449
    FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
450
      {
451
        int found_edge = 0;
452
 
453
        FOR_EACH_EDGE (e, ei, p->succs)
454
          if (e->dest == s)
455
            {
456
              found_edge = 1;
457
              break;
458
            }
459
 
460
        FOR_EACH_EDGE (e, ei, s->preds)
461
          if (e->src == p)
462
            {
463
              found_edge = 1;
464
              break;
465
            }
466
 
467
        if (EDGE_INDEX (elist, p, s)
468
            == EDGE_INDEX_NO_EDGE && found_edge != 0)
469
          fprintf (f, "*** Edge (%d, %d) appears to not have an index\n",
470
                   p->index, s->index);
471
        if (EDGE_INDEX (elist, p, s)
472
            != EDGE_INDEX_NO_EDGE && found_edge == 0)
473
          fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n",
474
                   p->index, s->index, EDGE_INDEX (elist, p, s));
475
      }
476
}
477
 
478
/* Given PRED and SUCC blocks, return the edge which connects the blocks.
479
   If no such edge exists, return NULL.  */
480
 
481
edge
482
find_edge (basic_block pred, basic_block succ)
483
{
484
  edge e;
485
  edge_iterator ei;
486
 
487
  if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
488
    {
489
      FOR_EACH_EDGE (e, ei, pred->succs)
490
        if (e->dest == succ)
491
          return e;
492
    }
493
  else
494
    {
495
      FOR_EACH_EDGE (e, ei, succ->preds)
496
        if (e->src == pred)
497
          return e;
498
    }
499
 
500
  return NULL;
501
}
502
 
503
/* This routine will determine what, if any, edge there is between
504
   a specified predecessor and successor.  */
505
 
506
int
507
find_edge_index (struct edge_list *edge_list, basic_block pred, basic_block succ)
508
{
509
  int x;
510
 
511
  for (x = 0; x < NUM_EDGES (edge_list); x++)
512
    if (INDEX_EDGE_PRED_BB (edge_list, x) == pred
513
        && INDEX_EDGE_SUCC_BB (edge_list, x) == succ)
514
      return x;
515
 
516
  return (EDGE_INDEX_NO_EDGE);
517
}
518
 
519
/* Dump the list of basic blocks in the bitmap NODES.  */
520
 
521
void
522
flow_nodes_print (const char *str, const sbitmap nodes, FILE *file)
523
{
524
  unsigned int node = 0;
525
  sbitmap_iterator sbi;
526
 
527
  if (! nodes)
528
    return;
529
 
530
  fprintf (file, "%s { ", str);
531
  EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, sbi)
532
    fprintf (file, "%d ", node);
533
  fputs ("}\n", file);
534
}
535
 
536
/* Dump the list of edges in the array EDGE_LIST.  */
537
 
538
void
539
flow_edge_list_print (const char *str, const edge *edge_list, int num_edges, FILE *file)
540
{
541
  int i;
542
 
543
  if (! edge_list)
544
    return;
545
 
546
  fprintf (file, "%s { ", str);
547
  for (i = 0; i < num_edges; i++)
548
    fprintf (file, "%d->%d ", edge_list[i]->src->index,
549
             edge_list[i]->dest->index);
550
 
551
  fputs ("}\n", file);
552
}
553
 
554
 
555
/* This routine will remove any fake predecessor edges for a basic block.
556
   When the edge is removed, it is also removed from whatever successor
557
   list it is in.  */
558
 
559
static void
560
remove_fake_predecessors (basic_block bb)
561
{
562
  edge e;
563
  edge_iterator ei;
564
 
565
  for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
566
    {
567
      if ((e->flags & EDGE_FAKE) == EDGE_FAKE)
568
        remove_edge (e);
569
      else
570
        ei_next (&ei);
571
    }
572
}
573
 
574
/* This routine will remove all fake edges from the flow graph.  If
575
   we remove all fake successors, it will automatically remove all
576
   fake predecessors.  */
577
 
578
void
579
remove_fake_edges (void)
580
{
581
  basic_block bb;
582
 
583
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
584
    remove_fake_predecessors (bb);
585
}
586
 
587
/* This routine will remove all fake edges to the EXIT_BLOCK.  */
588
 
589
void
590
remove_fake_exit_edges (void)
591
{
592
  remove_fake_predecessors (EXIT_BLOCK_PTR);
593
}
594
 
595
 
596
/* This function will add a fake edge between any block which has no
597
   successors, and the exit block. Some data flow equations require these
598
   edges to exist.  */
599
 
600
void
601
add_noreturn_fake_exit_edges (void)
602
{
603
  basic_block bb;
604
 
605
  FOR_EACH_BB (bb)
606
    if (EDGE_COUNT (bb->succs) == 0)
607
      make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
608
}
609
 
610
/* This function adds a fake edge between any infinite loops to the
611
   exit block.  Some optimizations require a path from each node to
612
   the exit node.
613
 
614
   See also Morgan, Figure 3.10, pp. 82-83.
615
 
616
   The current implementation is ugly, not attempting to minimize the
617
   number of inserted fake edges.  To reduce the number of fake edges
618
   to insert, add fake edges from _innermost_ loops containing only
619
   nodes not reachable from the exit block.  */
620
 
621
void
622
connect_infinite_loops_to_exit (void)
623
{
624
  basic_block unvisited_block = EXIT_BLOCK_PTR;
625
  struct depth_first_search_dsS dfs_ds;
626
 
627
  /* Perform depth-first search in the reverse graph to find nodes
628
     reachable from the exit block.  */
629
  flow_dfs_compute_reverse_init (&dfs_ds);
630
  flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR);
631
 
632
  /* Repeatedly add fake edges, updating the unreachable nodes.  */
633
  while (1)
634
    {
635
      unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds,
636
                                                          unvisited_block);
637
      if (!unvisited_block)
638
        break;
639
 
640
      make_edge (unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE);
641
      flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block);
642
    }
643
 
644
  flow_dfs_compute_reverse_finish (&dfs_ds);
645
  return;
646
}
647
 
648
/* Compute reverse top sort order.  */
649
 
650
void
651
flow_reverse_top_sort_order_compute (int *rts_order)
652
{
653
  edge_iterator *stack;
654
  int sp;
655
  int postnum = 0;
656
  sbitmap visited;
657
 
658
  /* Allocate stack for back-tracking up CFG.  */
659
  stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
660
  sp = 0;
661
 
662
  /* Allocate bitmap to track nodes that have been visited.  */
663
  visited = sbitmap_alloc (last_basic_block);
664
 
665
  /* None of the nodes in the CFG have been visited yet.  */
666
  sbitmap_zero (visited);
667
 
668
  /* Push the first edge on to the stack.  */
669
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
670
 
671
  while (sp)
672
    {
673
      edge_iterator ei;
674
      basic_block src;
675
      basic_block dest;
676
 
677
      /* Look at the edge on the top of the stack.  */
678
      ei = stack[sp - 1];
679
      src = ei_edge (ei)->src;
680
      dest = ei_edge (ei)->dest;
681
 
682
      /* Check if the edge destination has been visited yet.  */
683
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
684
        {
685
          /* Mark that we have visited the destination.  */
686
          SET_BIT (visited, dest->index);
687
 
688
          if (EDGE_COUNT (dest->succs) > 0)
689
            /* Since the DEST node has been visited for the first
690
               time, check its successors.  */
691
            stack[sp++] = ei_start (dest->succs);
692
          else
693
            rts_order[postnum++] = dest->index;
694
        }
695
      else
696
        {
697
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
698
           rts_order[postnum++] = src->index;
699
 
700
          if (!ei_one_before_end_p (ei))
701
            ei_next (&stack[sp - 1]);
702
          else
703
            sp--;
704
        }
705
    }
706
 
707
  free (stack);
708
  sbitmap_free (visited);
709
}
710
 
711
/* Compute the depth first search order and store in the array
712
  DFS_ORDER if nonzero, marking the nodes visited in VISITED.  If
713
  RC_ORDER is nonzero, return the reverse completion number for each
714
  node.  Returns the number of nodes visited.  A depth first search
715
  tries to get as far away from the starting point as quickly as
716
  possible.  */
717
 
718
int
719
flow_depth_first_order_compute (int *dfs_order, int *rc_order)
720
{
721
  edge_iterator *stack;
722
  int sp;
723
  int dfsnum = 0;
724
  int rcnum = n_basic_blocks - 1;
725
  sbitmap visited;
726
 
727
  /* Allocate stack for back-tracking up CFG.  */
728
  stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
729
  sp = 0;
730
 
731
  /* Allocate bitmap to track nodes that have been visited.  */
732
  visited = sbitmap_alloc (last_basic_block);
733
 
734
  /* None of the nodes in the CFG have been visited yet.  */
735
  sbitmap_zero (visited);
736
 
737
  /* Push the first edge on to the stack.  */
738
  stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
739
 
740
  while (sp)
741
    {
742
      edge_iterator ei;
743
      basic_block src;
744
      basic_block dest;
745
 
746
      /* Look at the edge on the top of the stack.  */
747
      ei = stack[sp - 1];
748
      src = ei_edge (ei)->src;
749
      dest = ei_edge (ei)->dest;
750
 
751
      /* Check if the edge destination has been visited yet.  */
752
      if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
753
        {
754
          /* Mark that we have visited the destination.  */
755
          SET_BIT (visited, dest->index);
756
 
757
          if (dfs_order)
758
            dfs_order[dfsnum] = dest->index;
759
 
760
          dfsnum++;
761
 
762
          if (EDGE_COUNT (dest->succs) > 0)
763
            /* Since the DEST node has been visited for the first
764
               time, check its successors.  */
765
            stack[sp++] = ei_start (dest->succs);
766
          else if (rc_order)
767
            /* There are no successors for the DEST node so assign
768
               its reverse completion number.  */
769
            rc_order[rcnum--] = dest->index;
770
        }
771
      else
772
        {
773
          if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR
774
              && rc_order)
775
            /* There are no more successors for the SRC node
776
               so assign its reverse completion number.  */
777
            rc_order[rcnum--] = src->index;
778
 
779
          if (!ei_one_before_end_p (ei))
780
            ei_next (&stack[sp - 1]);
781
          else
782
            sp--;
783
        }
784
    }
785
 
786
  free (stack);
787
  sbitmap_free (visited);
788
 
789
  /* The number of nodes visited should be the number of blocks.  */
790
  gcc_assert (dfsnum == n_basic_blocks);
791
 
792
  return dfsnum;
793
}
794
 
795
/* Compute the depth first search order on the _reverse_ graph and
796
   store in the array DFS_ORDER, marking the nodes visited in VISITED.
797
   Returns the number of nodes visited.
798
 
799
   The computation is split into three pieces:
800
 
801
   flow_dfs_compute_reverse_init () creates the necessary data
802
   structures.
803
 
804
   flow_dfs_compute_reverse_add_bb () adds a basic block to the data
805
   structures.  The block will start the search.
806
 
807
   flow_dfs_compute_reverse_execute () continues (or starts) the
808
   search using the block on the top of the stack, stopping when the
809
   stack is empty.
810
 
811
   flow_dfs_compute_reverse_finish () destroys the necessary data
812
   structures.
813
 
814
   Thus, the user will probably call ..._init(), call ..._add_bb() to
815
   add a beginning basic block to the stack, call ..._execute(),
816
   possibly add another bb to the stack and again call ..._execute(),
817
   ..., and finally call _finish().  */
818
 
819
/* Initialize the data structures used for depth-first search on the
820
   reverse graph.  If INITIALIZE_STACK is nonzero, the exit block is
821
   added to the basic block stack.  DATA is the current depth-first
822
   search context.  If INITIALIZE_STACK is nonzero, there is an
823
   element on the stack.  */
824
 
825
static void
826
flow_dfs_compute_reverse_init (depth_first_search_ds data)
827
{
828
  /* Allocate stack for back-tracking up CFG.  */
829
  data->stack = xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1))
830
                         * sizeof (basic_block));
831
  data->sp = 0;
832
 
833
  /* Allocate bitmap to track nodes that have been visited.  */
834
  data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
835
 
836
  /* None of the nodes in the CFG have been visited yet.  */
837
  sbitmap_zero (data->visited_blocks);
838
 
839
  return;
840
}
841
 
842
/* Add the specified basic block to the top of the dfs data
843
   structures.  When the search continues, it will start at the
844
   block.  */
845
 
846
static void
847
flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb)
848
{
849
  data->stack[data->sp++] = bb;
850
  SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1));
851
}
852
 
853
/* Continue the depth-first search through the reverse graph starting with the
854
   block at the stack's top and ending when the stack is empty.  Visited nodes
855
   are marked.  Returns an unvisited basic block, or NULL if there is none
856
   available.  */
857
 
858
static basic_block
859
flow_dfs_compute_reverse_execute (depth_first_search_ds data,
860
                                  basic_block last_unvisited)
861
{
862
  basic_block bb;
863
  edge e;
864
  edge_iterator ei;
865
 
866
  while (data->sp > 0)
867
    {
868
      bb = data->stack[--data->sp];
869
 
870
      /* Perform depth-first search on adjacent vertices.  */
871
      FOR_EACH_EDGE (e, ei, bb->preds)
872
        if (!TEST_BIT (data->visited_blocks,
873
                       e->src->index - (INVALID_BLOCK + 1)))
874
          flow_dfs_compute_reverse_add_bb (data, e->src);
875
    }
876
 
877
  /* Determine if there are unvisited basic blocks.  */
878
  FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb)
879
    if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)))
880
      return bb;
881
 
882
  return NULL;
883
}
884
 
885
/* Destroy the data structures needed for depth-first search on the
886
   reverse graph.  */
887
 
888
static void
889
flow_dfs_compute_reverse_finish (depth_first_search_ds data)
890
{
891
  free (data->stack);
892
  sbitmap_free (data->visited_blocks);
893
}
894
 
895
/* Performs dfs search from BB over vertices satisfying PREDICATE;
896
   if REVERSE, go against direction of edges.  Returns number of blocks
897
   found and their list in RSLT.  RSLT can contain at most RSLT_MAX items.  */
898
int
899
dfs_enumerate_from (basic_block bb, int reverse,
900
                    bool (*predicate) (basic_block, void *),
901
                    basic_block *rslt, int rslt_max, void *data)
902
{
903
  basic_block *st, lbb;
904
  int sp = 0, tv = 0;
905
  unsigned size;
906
 
907
  /* A bitmap to keep track of visited blocks.  Allocating it each time
908
     this function is called is not possible, since dfs_enumerate_from
909
     is often used on small (almost) disjoint parts of cfg (bodies of
910
     loops), and allocating a large sbitmap would lead to quadratic
911
     behavior.  */
912
  static sbitmap visited;
913
  static unsigned v_size;
914
 
915
#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index + 2))
916
#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index + 2))
917
#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index + 2))
918
 
919
  /* Resize the VISITED sbitmap if necessary.  */
920
  size = last_basic_block + 2;
921
  if (size < 10)
922
    size = 10;
923
 
924
  if (!visited)
925
    {
926
 
927
      visited = sbitmap_alloc (size);
928
      sbitmap_zero (visited);
929
      v_size = size;
930
    }
931
  else if (v_size < size)
932
    {
933
      /* Ensure that we increase the size of the sbitmap exponentially.  */
934
      if (2 * v_size > size)
935
        size = 2 * v_size;
936
 
937
      visited = sbitmap_resize (visited, size, 0);
938
      v_size = size;
939
    }
940
 
941
  st = xcalloc (rslt_max, sizeof (basic_block));
942
  rslt[tv++] = st[sp++] = bb;
943
  MARK_VISITED (bb);
944
  while (sp)
945
    {
946
      edge e;
947
      edge_iterator ei;
948
      lbb = st[--sp];
949
      if (reverse)
950
        {
951
          FOR_EACH_EDGE (e, ei, lbb->preds)
952
            if (!VISITED_P (e->src) && predicate (e->src, data))
953
              {
954
                gcc_assert (tv != rslt_max);
955
                rslt[tv++] = st[sp++] = e->src;
956
                MARK_VISITED (e->src);
957
              }
958
        }
959
      else
960
        {
961
          FOR_EACH_EDGE (e, ei, lbb->succs)
962
            if (!VISITED_P (e->dest) && predicate (e->dest, data))
963
              {
964
                gcc_assert (tv != rslt_max);
965
                rslt[tv++] = st[sp++] = e->dest;
966
                MARK_VISITED (e->dest);
967
              }
968
        }
969
    }
970
  free (st);
971
  for (sp = 0; sp < tv; sp++)
972
    UNMARK_VISITED (rslt[sp]);
973
  return tv;
974
#undef MARK_VISITED
975
#undef UNMARK_VISITED
976
#undef VISITED_P
977
}
978
 
979
 
980
/* Compute dominance frontiers, ala Harvey, Ferrante, et al.
981
 
982
   This algorithm can be found in Timothy Harvey's PhD thesis, at
983
   http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative
984
   dominance algorithms.
985
 
986
   First, we identify each join point, j (any node with more than one
987
   incoming edge is a join point).
988
 
989
   We then examine each predecessor, p, of j and walk up the dominator tree
990
   starting at p.
991
 
992
   We stop the walk when we reach j's immediate dominator - j is in the
993
   dominance frontier of each of  the nodes in the walk, except for j's
994
   immediate dominator. Intuitively, all of the rest of j's dominators are
995
   shared by j's predecessors as well.
996
   Since they dominate j, they will not have j in their dominance frontiers.
997
 
998
   The number of nodes touched by this algorithm is equal to the size
999
   of the dominance frontiers, no more, no less.
1000
*/
1001
 
1002
 
1003
static void
1004
compute_dominance_frontiers_1 (bitmap *frontiers)
1005
{
1006
  edge p;
1007
  edge_iterator ei;
1008
  basic_block b;
1009
  FOR_EACH_BB (b)
1010
    {
1011
      if (EDGE_COUNT (b->preds) >= 2)
1012
        {
1013
          FOR_EACH_EDGE (p, ei, b->preds)
1014
            {
1015
              basic_block runner = p->src;
1016
              basic_block domsb;
1017
              if (runner == ENTRY_BLOCK_PTR)
1018
                continue;
1019
 
1020
              domsb = get_immediate_dominator (CDI_DOMINATORS, b);
1021
              while (runner != domsb)
1022
                {
1023
                  bitmap_set_bit (frontiers[runner->index],
1024
                                  b->index);
1025
                  runner = get_immediate_dominator (CDI_DOMINATORS,
1026
                                                    runner);
1027
                }
1028
            }
1029
        }
1030
    }
1031
}
1032
 
1033
 
1034
void
1035
compute_dominance_frontiers (bitmap *frontiers)
1036
{
1037
  timevar_push (TV_DOM_FRONTIERS);
1038
 
1039
  compute_dominance_frontiers_1 (frontiers);
1040
 
1041
  timevar_pop (TV_DOM_FRONTIERS);
1042
}
1043
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.