OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [cfgloop.c] - Blame information for rev 841

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Natural loop discovery code for GNU compiler.
2
   Copyright (C) 2000, 2001, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it under
7
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
#include "config.h"
21
#include "system.h"
22
#include "coretypes.h"
23
#include "tm.h"
24
#include "rtl.h"
25
#include "hard-reg-set.h"
26
#include "obstack.h"
27
#include "function.h"
28
#include "basic-block.h"
29
#include "toplev.h"
30
#include "cfgloop.h"
31
#include "flags.h"
32
#include "tree.h"
33
#include "tree-flow.h"
34
 
35
/* Ratio of frequencies of edges so that one of more latch edges is
36
   considered to belong to inner loop with same header.  */
37
#define HEAVY_EDGE_RATIO 8
38
 
39
#define HEADER_BLOCK(B) (* (int *) (B)->aux)
40
#define LATCH_EDGE(E) (*(int *) (E)->aux)
41
 
42
static void flow_loops_cfg_dump (const struct loops *, FILE *);
43
static int flow_loop_level_compute (struct loop *);
44
static void flow_loops_level_compute (struct loops *);
45
static void establish_preds (struct loop *);
46
static void canonicalize_loop_headers (void);
47
static bool glb_enum_p (basic_block, void *);
48
 
49
/* Dump loop related CFG information.  */
50
 
51
static void
52
flow_loops_cfg_dump (const struct loops *loops, FILE *file)
53
{
54
  int i;
55
  basic_block bb;
56
 
57
  if (! loops->num || ! file)
58
    return;
59
 
60
  FOR_EACH_BB (bb)
61
    {
62
      edge succ;
63
      edge_iterator ei;
64
 
65
      fprintf (file, ";; %d succs { ", bb->index);
66
      FOR_EACH_EDGE (succ, ei, bb->succs)
67
        fprintf (file, "%d ", succ->dest->index);
68
      fprintf (file, "}\n");
69
    }
70
 
71
  /* Dump the DFS node order.  */
72
  if (loops->cfg.dfs_order)
73
    {
74
      fputs (";; DFS order: ", file);
75
      for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
76
        fprintf (file, "%d ", loops->cfg.dfs_order[i]);
77
 
78
      fputs ("\n", file);
79
    }
80
 
81
  /* Dump the reverse completion node order.  */
82
  if (loops->cfg.rc_order)
83
    {
84
      fputs (";; RC order: ", file);
85
      for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
86
        fprintf (file, "%d ", loops->cfg.rc_order[i]);
87
 
88
      fputs ("\n", file);
89
    }
90
}
91
 
92
/* Return nonzero if the nodes of LOOP are a subset of OUTER.  */
93
 
94
bool
95
flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
96
{
97
  return (loop->depth > outer->depth
98
         && loop->pred[outer->depth] == outer);
99
}
100
 
101
/* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
102
   loops within LOOP.  */
103
 
104
struct loop *
105
superloop_at_depth (struct loop *loop, unsigned depth)
106
{
107
  gcc_assert (depth <= (unsigned) loop->depth);
108
 
109
  if (depth == (unsigned) loop->depth)
110
    return loop;
111
 
112
  return loop->pred[depth];
113
}
114
 
115
/* Dump the loop information specified by LOOP to the stream FILE
116
   using auxiliary dump callback function LOOP_DUMP_AUX if non null.  */
117
 
118
void
119
flow_loop_dump (const struct loop *loop, FILE *file,
120
                void (*loop_dump_aux) (const struct loop *, FILE *, int),
121
                int verbose)
122
{
123
  basic_block *bbs;
124
  unsigned i;
125
 
126
  if (! loop || ! loop->header)
127
    return;
128
 
129
  fprintf (file, ";;\n;; Loop %d\n", loop->num);
130
 
131
  fprintf (file, ";;  header %d, latch %d\n",
132
           loop->header->index, loop->latch->index);
133
  fprintf (file, ";;  depth %d, level %d, outer %ld\n",
134
           loop->depth, loop->level,
135
           (long) (loop->outer ? loop->outer->num : -1));
136
 
137
  fprintf (file, ";;  nodes:");
138
  bbs = get_loop_body (loop);
139
  for (i = 0; i < loop->num_nodes; i++)
140
    fprintf (file, " %d", bbs[i]->index);
141
  free (bbs);
142
  fprintf (file, "\n");
143
 
144
  if (loop_dump_aux)
145
    loop_dump_aux (loop, file, verbose);
146
}
147
 
148
/* Dump the loop information specified by LOOPS to the stream FILE,
149
   using auxiliary dump callback function LOOP_DUMP_AUX if non null.  */
150
 
151
void
152
flow_loops_dump (const struct loops *loops, FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
153
{
154
  int i;
155
  int num_loops;
156
 
157
  num_loops = loops->num;
158
  if (! num_loops || ! file)
159
    return;
160
 
161
  fprintf (file, ";; %d loops found\n", num_loops);
162
 
163
  for (i = 0; i < num_loops; i++)
164
    {
165
      struct loop *loop = loops->parray[i];
166
 
167
      if (!loop)
168
        continue;
169
 
170
      flow_loop_dump (loop, file, loop_dump_aux, verbose);
171
    }
172
 
173
  if (verbose)
174
    flow_loops_cfg_dump (loops, file);
175
}
176
 
177
/* Free data allocated for LOOP.  */
178
void
179
flow_loop_free (struct loop *loop)
180
{
181
  if (loop->pred)
182
    free (loop->pred);
183
  free (loop);
184
}
185
 
186
/* Free all the memory allocated for LOOPS.  */
187
 
188
void
189
flow_loops_free (struct loops *loops)
190
{
191
  if (loops->parray)
192
    {
193
      unsigned i;
194
 
195
      gcc_assert (loops->num);
196
 
197
      /* Free the loop descriptors.  */
198
      for (i = 0; i < loops->num; i++)
199
        {
200
          struct loop *loop = loops->parray[i];
201
 
202
          if (!loop)
203
            continue;
204
 
205
          flow_loop_free (loop);
206
        }
207
 
208
      free (loops->parray);
209
      loops->parray = NULL;
210
 
211
      if (loops->cfg.dfs_order)
212
        free (loops->cfg.dfs_order);
213
      if (loops->cfg.rc_order)
214
        free (loops->cfg.rc_order);
215
 
216
    }
217
}
218
 
219
/* Find the nodes contained within the LOOP with header HEADER.
220
   Return the number of nodes within the loop.  */
221
 
222
int
223
flow_loop_nodes_find (basic_block header, struct loop *loop)
224
{
225
  basic_block *stack;
226
  int sp;
227
  int num_nodes = 1;
228
 
229
  header->loop_father = loop;
230
  header->loop_depth = loop->depth;
231
 
232
  if (loop->latch->loop_father != loop)
233
    {
234
      stack = XNEWVEC (basic_block, n_basic_blocks);
235
      sp = 0;
236
      num_nodes++;
237
      stack[sp++] = loop->latch;
238
      loop->latch->loop_father = loop;
239
      loop->latch->loop_depth = loop->depth;
240
 
241
      while (sp)
242
        {
243
          basic_block node;
244
          edge e;
245
          edge_iterator ei;
246
 
247
          node = stack[--sp];
248
 
249
          FOR_EACH_EDGE (e, ei, node->preds)
250
            {
251
              basic_block ancestor = e->src;
252
 
253
              if (ancestor != ENTRY_BLOCK_PTR
254
                  && ancestor->loop_father != loop)
255
                {
256
                  ancestor->loop_father = loop;
257
                  ancestor->loop_depth = loop->depth;
258
                  num_nodes++;
259
                  stack[sp++] = ancestor;
260
                }
261
            }
262
        }
263
      free (stack);
264
    }
265
  return num_nodes;
266
}
267
 
268
/* For each loop in the lOOPS tree that has just a single exit
269
   record the exit edge.  */
270
 
271
void
272
mark_single_exit_loops (struct loops *loops)
273
{
274
  basic_block bb;
275
  edge e;
276
  struct loop *loop;
277
  unsigned i;
278
 
279
  for (i = 1; i < loops->num; i++)
280
    {
281
      loop = loops->parray[i];
282
      if (loop)
283
        loop->single_exit = NULL;
284
    }
285
 
286
  FOR_EACH_BB (bb)
287
    {
288
      edge_iterator ei;
289
      if (bb->loop_father == loops->tree_root)
290
        continue;
291
      FOR_EACH_EDGE (e, ei, bb->succs)
292
        {
293
          if (e->dest == EXIT_BLOCK_PTR)
294
            continue;
295
 
296
          if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
297
            continue;
298
 
299
          for (loop = bb->loop_father;
300
               loop != e->dest->loop_father;
301
               loop = loop->outer)
302
            {
303
              /* If we have already seen an exit, mark this by the edge that
304
                 surely does not occur as any exit.  */
305
              if (loop->single_exit)
306
                loop->single_exit = single_succ_edge (ENTRY_BLOCK_PTR);
307
              else
308
                loop->single_exit = e;
309
            }
310
        }
311
    }
312
 
313
  for (i = 1; i < loops->num; i++)
314
    {
315
      loop = loops->parray[i];
316
      if (!loop)
317
        continue;
318
 
319
      if (loop->single_exit == single_succ_edge (ENTRY_BLOCK_PTR))
320
        loop->single_exit = NULL;
321
    }
322
 
323
  loops->state |= LOOPS_HAVE_MARKED_SINGLE_EXITS;
324
}
325
 
326
static void
327
establish_preds (struct loop *loop)
328
{
329
  struct loop *ploop, *father = loop->outer;
330
 
331
  loop->depth = father->depth + 1;
332
 
333
  /* Remember the current loop depth if it is the largest seen so far.  */
334
  cfun->max_loop_depth = MAX (cfun->max_loop_depth, loop->depth);
335
 
336
  if (loop->pred)
337
    free (loop->pred);
338
  loop->pred = XNEWVEC (struct loop *, loop->depth);
339
  memcpy (loop->pred, father->pred, sizeof (struct loop *) * father->depth);
340
  loop->pred[father->depth] = father;
341
 
342
  for (ploop = loop->inner; ploop; ploop = ploop->next)
343
    establish_preds (ploop);
344
}
345
 
346
/* Add LOOP to the loop hierarchy tree where FATHER is father of the
347
   added loop.  If LOOP has some children, take care of that their
348
   pred field will be initialized correctly.  */
349
 
350
void
351
flow_loop_tree_node_add (struct loop *father, struct loop *loop)
352
{
353
  loop->next = father->inner;
354
  father->inner = loop;
355
  loop->outer = father;
356
 
357
  establish_preds (loop);
358
}
359
 
360
/* Remove LOOP from the loop hierarchy tree.  */
361
 
362
void
363
flow_loop_tree_node_remove (struct loop *loop)
364
{
365
  struct loop *prev, *father;
366
 
367
  father = loop->outer;
368
  loop->outer = NULL;
369
 
370
  /* Remove loop from the list of sons.  */
371
  if (father->inner == loop)
372
    father->inner = loop->next;
373
  else
374
    {
375
      for (prev = father->inner; prev->next != loop; prev = prev->next);
376
      prev->next = loop->next;
377
    }
378
 
379
  loop->depth = -1;
380
  free (loop->pred);
381
  loop->pred = NULL;
382
}
383
 
384
/* Helper function to compute loop nesting depth and enclosed loop level
385
   for the natural loop specified by LOOP.  Returns the loop level.  */
386
 
387
static int
388
flow_loop_level_compute (struct loop *loop)
389
{
390
  struct loop *inner;
391
  int level = 1;
392
 
393
  if (! loop)
394
    return 0;
395
 
396
  /* Traverse loop tree assigning depth and computing level as the
397
     maximum level of all the inner loops of this loop.  The loop
398
     level is equivalent to the height of the loop in the loop tree
399
     and corresponds to the number of enclosed loop levels (including
400
     itself).  */
401
  for (inner = loop->inner; inner; inner = inner->next)
402
    {
403
      int ilevel = flow_loop_level_compute (inner) + 1;
404
 
405
      if (ilevel > level)
406
        level = ilevel;
407
    }
408
 
409
  loop->level = level;
410
  return level;
411
}
412
 
413
/* Compute the loop nesting depth and enclosed loop level for the loop
414
   hierarchy tree specified by LOOPS.  Return the maximum enclosed loop
415
   level.  */
416
 
417
static void
418
flow_loops_level_compute (struct loops *loops)
419
{
420
  flow_loop_level_compute (loops->tree_root);
421
}
422
 
423
/* A callback to update latch and header info for basic block JUMP created
424
   by redirecting an edge.  */
425
 
426
static void
427
update_latch_info (basic_block jump)
428
{
429
  alloc_aux_for_block (jump, sizeof (int));
430
  HEADER_BLOCK (jump) = 0;
431
  alloc_aux_for_edge (single_pred_edge (jump), sizeof (int));
432
  LATCH_EDGE (single_pred_edge (jump)) = 0;
433
  set_immediate_dominator (CDI_DOMINATORS, jump, single_pred (jump));
434
}
435
 
436
/* A callback for make_forwarder block, to redirect all edges except for
437
   MFB_KJ_EDGE to the entry part.  E is the edge for that we should decide
438
   whether to redirect it.  */
439
 
440
static edge mfb_kj_edge;
441
static bool
442
mfb_keep_just (edge e)
443
{
444
  return e != mfb_kj_edge;
445
}
446
 
447
/* A callback for make_forwarder block, to redirect the latch edges into an
448
   entry part.  E is the edge for that we should decide whether to redirect
449
   it.  */
450
 
451
static bool
452
mfb_keep_nonlatch (edge e)
453
{
454
  return LATCH_EDGE (e);
455
}
456
 
457
/* Takes care of merging natural loops with shared headers.  */
458
 
459
static void
460
canonicalize_loop_headers (void)
461
{
462
  basic_block header;
463
  edge e;
464
 
465
  alloc_aux_for_blocks (sizeof (int));
466
  alloc_aux_for_edges (sizeof (int));
467
 
468
  /* Split blocks so that each loop has only single latch.  */
469
  FOR_EACH_BB (header)
470
    {
471
      edge_iterator ei;
472
      int num_latches = 0;
473
      int have_abnormal_edge = 0;
474
 
475
      FOR_EACH_EDGE (e, ei, header->preds)
476
        {
477
          basic_block latch = e->src;
478
 
479
          if (e->flags & EDGE_ABNORMAL)
480
            have_abnormal_edge = 1;
481
 
482
          if (latch != ENTRY_BLOCK_PTR
483
              && dominated_by_p (CDI_DOMINATORS, latch, header))
484
            {
485
              num_latches++;
486
              LATCH_EDGE (e) = 1;
487
            }
488
        }
489
      if (have_abnormal_edge)
490
        HEADER_BLOCK (header) = 0;
491
      else
492
        HEADER_BLOCK (header) = num_latches;
493
    }
494
 
495
  if (HEADER_BLOCK (single_succ (ENTRY_BLOCK_PTR)))
496
    {
497
      basic_block bb;
498
 
499
      /* We could not redirect edges freely here. On the other hand,
500
         we can simply split the edge from entry block.  */
501
      bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
502
 
503
      alloc_aux_for_edge (single_succ_edge (bb), sizeof (int));
504
      LATCH_EDGE (single_succ_edge (bb)) = 0;
505
      alloc_aux_for_block (bb, sizeof (int));
506
      HEADER_BLOCK (bb) = 0;
507
    }
508
 
509
  FOR_EACH_BB (header)
510
    {
511
      int max_freq, is_heavy;
512
      edge heavy, tmp_edge;
513
      edge_iterator ei;
514
 
515
      if (HEADER_BLOCK (header) <= 1)
516
        continue;
517
 
518
      /* Find a heavy edge.  */
519
      is_heavy = 1;
520
      heavy = NULL;
521
      max_freq = 0;
522
      FOR_EACH_EDGE (e, ei, header->preds)
523
        if (LATCH_EDGE (e) &&
524
            EDGE_FREQUENCY (e) > max_freq)
525
          max_freq = EDGE_FREQUENCY (e);
526
      FOR_EACH_EDGE (e, ei, header->preds)
527
        if (LATCH_EDGE (e) &&
528
            EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO)
529
          {
530
            if (heavy)
531
              {
532
                is_heavy = 0;
533
                break;
534
              }
535
            else
536
              heavy = e;
537
          }
538
 
539
      if (is_heavy)
540
        {
541
          /* Split out the heavy edge, and create inner loop for it.  */
542
          mfb_kj_edge = heavy;
543
          tmp_edge = make_forwarder_block (header, mfb_keep_just,
544
                                           update_latch_info);
545
          alloc_aux_for_block (tmp_edge->dest, sizeof (int));
546
          HEADER_BLOCK (tmp_edge->dest) = 1;
547
          alloc_aux_for_edge (tmp_edge, sizeof (int));
548
          LATCH_EDGE (tmp_edge) = 0;
549
          HEADER_BLOCK (header)--;
550
        }
551
 
552
      if (HEADER_BLOCK (header) > 1)
553
        {
554
          /* Create a new latch block.  */
555
          tmp_edge = make_forwarder_block (header, mfb_keep_nonlatch,
556
                                           update_latch_info);
557
          alloc_aux_for_block (tmp_edge->dest, sizeof (int));
558
          HEADER_BLOCK (tmp_edge->src) = 0;
559
          HEADER_BLOCK (tmp_edge->dest) = 1;
560
          alloc_aux_for_edge (tmp_edge, sizeof (int));
561
          LATCH_EDGE (tmp_edge) = 1;
562
        }
563
    }
564
 
565
  free_aux_for_blocks ();
566
  free_aux_for_edges ();
567
 
568
#ifdef ENABLE_CHECKING
569
  verify_dominators (CDI_DOMINATORS);
570
#endif
571
}
572
 
573
/* Initialize all the parallel_p fields of the loops structure to true.  */
574
 
575
static void
576
initialize_loops_parallel_p (struct loops *loops)
577
{
578
  unsigned int i;
579
 
580
  for (i = 0; i < loops->num; i++)
581
    {
582
      struct loop *loop = loops->parray[i];
583
      loop->parallel_p = true;
584
    }
585
}
586
 
587
/* Find all the natural loops in the function and save in LOOPS structure and
588
   recalculate loop_depth information in basic block structures.
589
   Return the number of natural loops found.  */
590
 
591
int
592
flow_loops_find (struct loops *loops)
593
{
594
  int b;
595
  int num_loops;
596
  edge e;
597
  sbitmap headers;
598
  int *dfs_order;
599
  int *rc_order;
600
  basic_block header;
601
  basic_block bb;
602
 
603
  memset (loops, 0, sizeof *loops);
604
 
605
  /* We are going to recount the maximum loop depth,
606
     so throw away the last count.  */
607
  cfun->max_loop_depth = 0;
608
 
609
  /* Taking care of this degenerate case makes the rest of
610
     this code simpler.  */
611
  if (n_basic_blocks == NUM_FIXED_BLOCKS)
612
    return 0;
613
 
614
  dfs_order = NULL;
615
  rc_order = NULL;
616
 
617
  /* Ensure that the dominators are computed.  */
618
  calculate_dominance_info (CDI_DOMINATORS);
619
 
620
  /* Join loops with shared headers.  */
621
  canonicalize_loop_headers ();
622
 
623
  /* Count the number of loop headers.  This should be the
624
     same as the number of natural loops.  */
625
  headers = sbitmap_alloc (last_basic_block);
626
  sbitmap_zero (headers);
627
 
628
  num_loops = 0;
629
  FOR_EACH_BB (header)
630
    {
631
      edge_iterator ei;
632
      int more_latches = 0;
633
 
634
      header->loop_depth = 0;
635
 
636
      /* If we have an abnormal predecessor, do not consider the
637
         loop (not worth the problems).  */
638
      FOR_EACH_EDGE (e, ei, header->preds)
639
        if (e->flags & EDGE_ABNORMAL)
640
          break;
641
      if (e)
642
        continue;
643
 
644
      FOR_EACH_EDGE (e, ei, header->preds)
645
        {
646
          basic_block latch = e->src;
647
 
648
          gcc_assert (!(e->flags & EDGE_ABNORMAL));
649
 
650
          /* Look for back edges where a predecessor is dominated
651
             by this block.  A natural loop has a single entry
652
             node (header) that dominates all the nodes in the
653
             loop.  It also has single back edge to the header
654
             from a latch node.  */
655
          if (latch != ENTRY_BLOCK_PTR
656
              && dominated_by_p (CDI_DOMINATORS, latch, header))
657
            {
658
              /* Shared headers should be eliminated by now.  */
659
              gcc_assert (!more_latches);
660
              more_latches = 1;
661
              SET_BIT (headers, header->index);
662
              num_loops++;
663
            }
664
        }
665
    }
666
 
667
  /* Allocate loop structures.  */
668
  loops->parray = XCNEWVEC (struct loop *, num_loops + 1);
669
 
670
  /* Dummy loop containing whole function.  */
671
  loops->parray[0] = XCNEW (struct loop);
672
  loops->parray[0]->next = NULL;
673
  loops->parray[0]->inner = NULL;
674
  loops->parray[0]->outer = NULL;
675
  loops->parray[0]->depth = 0;
676
  loops->parray[0]->pred = NULL;
677
  loops->parray[0]->num_nodes = n_basic_blocks;
678
  loops->parray[0]->latch = EXIT_BLOCK_PTR;
679
  loops->parray[0]->header = ENTRY_BLOCK_PTR;
680
  ENTRY_BLOCK_PTR->loop_father = loops->parray[0];
681
  EXIT_BLOCK_PTR->loop_father = loops->parray[0];
682
 
683
  loops->tree_root = loops->parray[0];
684
 
685
  /* Find and record information about all the natural loops
686
     in the CFG.  */
687
  loops->num = 1;
688
  FOR_EACH_BB (bb)
689
    bb->loop_father = loops->tree_root;
690
 
691
  if (num_loops)
692
    {
693
      /* Compute depth first search order of the CFG so that outer
694
         natural loops will be found before inner natural loops.  */
695
      dfs_order = XNEWVEC (int, n_basic_blocks);
696
      rc_order = XNEWVEC (int, n_basic_blocks);
697
      pre_and_rev_post_order_compute (dfs_order, rc_order, false);
698
 
699
      /* Save CFG derived information to avoid recomputing it.  */
700
      loops->cfg.dfs_order = dfs_order;
701
      loops->cfg.rc_order = rc_order;
702
 
703
      num_loops = 1;
704
 
705
      for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
706
        {
707
          struct loop *loop;
708
          edge_iterator ei;
709
 
710
          /* Search the nodes of the CFG in reverse completion order
711
             so that we can find outer loops first.  */
712
          if (!TEST_BIT (headers, rc_order[b]))
713
            continue;
714
 
715
          header = BASIC_BLOCK (rc_order[b]);
716
 
717
          loop = loops->parray[num_loops] = XCNEW (struct loop);
718
 
719
          loop->header = header;
720
          loop->num = num_loops;
721
          num_loops++;
722
 
723
          /* Look for the latch for this header block.  */
724
          FOR_EACH_EDGE (e, ei, header->preds)
725
            {
726
              basic_block latch = e->src;
727
 
728
              if (latch != ENTRY_BLOCK_PTR
729
                  && dominated_by_p (CDI_DOMINATORS, latch, header))
730
                {
731
                  loop->latch = latch;
732
                  break;
733
                }
734
            }
735
 
736
          flow_loop_tree_node_add (header->loop_father, loop);
737
          loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
738
        }
739
 
740
      /* Assign the loop nesting depth and enclosed loop level for each
741
         loop.  */
742
      flow_loops_level_compute (loops);
743
 
744
      loops->num = num_loops;
745
      initialize_loops_parallel_p (loops);
746
    }
747
 
748
  sbitmap_free (headers);
749
 
750
  loops->state = 0;
751
#ifdef ENABLE_CHECKING
752
  verify_flow_info ();
753
  verify_loop_structure (loops);
754
#endif
755
 
756
  return loops->num;
757
}
758
 
759
/* Return nonzero if basic block BB belongs to LOOP.  */
760
bool
761
flow_bb_inside_loop_p (const struct loop *loop, const basic_block bb)
762
{
763
  struct loop *source_loop;
764
 
765
  if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
766
    return 0;
767
 
768
  source_loop = bb->loop_father;
769
  return loop == source_loop || flow_loop_nested_p (loop, source_loop);
770
}
771
 
772
/* Enumeration predicate for get_loop_body.  */
773
static bool
774
glb_enum_p (basic_block bb, void *glb_header)
775
{
776
  return bb != (basic_block) glb_header;
777
}
778
 
779
/* Gets basic blocks of a LOOP.  Header is the 0-th block, rest is in dfs
780
   order against direction of edges from latch.  Specially, if
781
   header != latch, latch is the 1-st block.  */
782
basic_block *
783
get_loop_body (const struct loop *loop)
784
{
785
  basic_block *tovisit, bb;
786
  unsigned tv = 0;
787
 
788
  gcc_assert (loop->num_nodes);
789
 
790
  tovisit = XCNEWVEC (basic_block, loop->num_nodes);
791
  tovisit[tv++] = loop->header;
792
 
793
  if (loop->latch == EXIT_BLOCK_PTR)
794
    {
795
      /* There may be blocks unreachable from EXIT_BLOCK.  */
796
      gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
797
      FOR_EACH_BB (bb)
798
        tovisit[tv++] = bb;
799
      tovisit[tv++] = EXIT_BLOCK_PTR;
800
    }
801
  else if (loop->latch != loop->header)
802
    {
803
      tv = dfs_enumerate_from (loop->latch, 1, glb_enum_p,
804
                               tovisit + 1, loop->num_nodes - 1,
805
                               loop->header) + 1;
806
    }
807
 
808
  gcc_assert (tv == loop->num_nodes);
809
  return tovisit;
810
}
811
 
812
/* Fills dominance descendants inside LOOP of the basic block BB into
813
   array TOVISIT from index *TV.  */
814
 
815
static void
816
fill_sons_in_loop (const struct loop *loop, basic_block bb,
817
                   basic_block *tovisit, int *tv)
818
{
819
  basic_block son, postpone = NULL;
820
 
821
  tovisit[(*tv)++] = bb;
822
  for (son = first_dom_son (CDI_DOMINATORS, bb);
823
       son;
824
       son = next_dom_son (CDI_DOMINATORS, son))
825
    {
826
      if (!flow_bb_inside_loop_p (loop, son))
827
        continue;
828
 
829
      if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
830
        {
831
          postpone = son;
832
          continue;
833
        }
834
      fill_sons_in_loop (loop, son, tovisit, tv);
835
    }
836
 
837
  if (postpone)
838
    fill_sons_in_loop (loop, postpone, tovisit, tv);
839
}
840
 
841
/* Gets body of a LOOP (that must be different from the outermost loop)
842
   sorted by dominance relation.  Additionally, if a basic block s dominates
843
   the latch, then only blocks dominated by s are be after it.  */
844
 
845
basic_block *
846
get_loop_body_in_dom_order (const struct loop *loop)
847
{
848
  basic_block *tovisit;
849
  int tv;
850
 
851
  gcc_assert (loop->num_nodes);
852
 
853
  tovisit = XCNEWVEC (basic_block, loop->num_nodes);
854
 
855
  gcc_assert (loop->latch != EXIT_BLOCK_PTR);
856
 
857
  tv = 0;
858
  fill_sons_in_loop (loop, loop->header, tovisit, &tv);
859
 
860
  gcc_assert (tv == (int) loop->num_nodes);
861
 
862
  return tovisit;
863
}
864
 
865
/* Get body of a LOOP in breadth first sort order.  */
866
 
867
basic_block *
868
get_loop_body_in_bfs_order (const struct loop *loop)
869
{
870
  basic_block *blocks;
871
  basic_block bb;
872
  bitmap visited;
873
  unsigned int i = 0;
874
  unsigned int vc = 1;
875
 
876
  gcc_assert (loop->num_nodes);
877
  gcc_assert (loop->latch != EXIT_BLOCK_PTR);
878
 
879
  blocks = XCNEWVEC (basic_block, loop->num_nodes);
880
  visited = BITMAP_ALLOC (NULL);
881
 
882
  bb = loop->header;
883
  while (i < loop->num_nodes)
884
    {
885
      edge e;
886
      edge_iterator ei;
887
 
888
      if (!bitmap_bit_p (visited, bb->index))
889
        {
890
          /* This basic block is now visited */
891
          bitmap_set_bit (visited, bb->index);
892
          blocks[i++] = bb;
893
        }
894
 
895
      FOR_EACH_EDGE (e, ei, bb->succs)
896
        {
897
          if (flow_bb_inside_loop_p (loop, e->dest))
898
            {
899
              if (!bitmap_bit_p (visited, e->dest->index))
900
                {
901
                  bitmap_set_bit (visited, e->dest->index);
902
                  blocks[i++] = e->dest;
903
                }
904
            }
905
        }
906
 
907
      gcc_assert (i >= vc);
908
 
909
      bb = blocks[vc++];
910
    }
911
 
912
  BITMAP_FREE (visited);
913
  return blocks;
914
}
915
 
916
/* Gets exit edges of a LOOP, returning their number in N_EDGES.  */
917
edge *
918
get_loop_exit_edges (const struct loop *loop, unsigned int *num_edges)
919
{
920
  edge *edges, e;
921
  unsigned i, n;
922
  basic_block * body;
923
  edge_iterator ei;
924
 
925
  gcc_assert (loop->latch != EXIT_BLOCK_PTR);
926
 
927
  body = get_loop_body (loop);
928
  n = 0;
929
  for (i = 0; i < loop->num_nodes; i++)
930
    FOR_EACH_EDGE (e, ei, body[i]->succs)
931
      if (!flow_bb_inside_loop_p (loop, e->dest))
932
        n++;
933
  edges = XNEWVEC (edge, n);
934
  *num_edges = n;
935
  n = 0;
936
  for (i = 0; i < loop->num_nodes; i++)
937
    FOR_EACH_EDGE (e, ei, body[i]->succs)
938
      if (!flow_bb_inside_loop_p (loop, e->dest))
939
        edges[n++] = e;
940
  free (body);
941
 
942
  return edges;
943
}
944
 
945
/* Counts the number of conditional branches inside LOOP.  */
946
 
947
unsigned
948
num_loop_branches (const struct loop *loop)
949
{
950
  unsigned i, n;
951
  basic_block * body;
952
 
953
  gcc_assert (loop->latch != EXIT_BLOCK_PTR);
954
 
955
  body = get_loop_body (loop);
956
  n = 0;
957
  for (i = 0; i < loop->num_nodes; i++)
958
    if (EDGE_COUNT (body[i]->succs) >= 2)
959
      n++;
960
  free (body);
961
 
962
  return n;
963
}
964
 
965
/* Adds basic block BB to LOOP.  */
966
void
967
add_bb_to_loop (basic_block bb, struct loop *loop)
968
{
969
   int i;
970
 
971
   bb->loop_father = loop;
972
   bb->loop_depth = loop->depth;
973
   loop->num_nodes++;
974
   for (i = 0; i < loop->depth; i++)
975
     loop->pred[i]->num_nodes++;
976
 }
977
 
978
/* Remove basic block BB from loops.  */
979
void
980
remove_bb_from_loops (basic_block bb)
981
{
982
   int i;
983
   struct loop *loop = bb->loop_father;
984
 
985
   loop->num_nodes--;
986
   for (i = 0; i < loop->depth; i++)
987
     loop->pred[i]->num_nodes--;
988
   bb->loop_father = NULL;
989
   bb->loop_depth = 0;
990
}
991
 
992
/* Finds nearest common ancestor in loop tree for given loops.  */
993
struct loop *
994
find_common_loop (struct loop *loop_s, struct loop *loop_d)
995
{
996
  if (!loop_s) return loop_d;
997
  if (!loop_d) return loop_s;
998
 
999
  if (loop_s->depth < loop_d->depth)
1000
    loop_d = loop_d->pred[loop_s->depth];
1001
  else if (loop_s->depth > loop_d->depth)
1002
    loop_s = loop_s->pred[loop_d->depth];
1003
 
1004
  while (loop_s != loop_d)
1005
    {
1006
      loop_s = loop_s->outer;
1007
      loop_d = loop_d->outer;
1008
    }
1009
  return loop_s;
1010
}
1011
 
1012
/* Cancels the LOOP; it must be innermost one.  */
1013
 
1014
static void
1015
cancel_loop (struct loops *loops, struct loop *loop)
1016
{
1017
  basic_block *bbs;
1018
  unsigned i;
1019
 
1020
  gcc_assert (!loop->inner);
1021
 
1022
  /* Move blocks up one level (they should be removed as soon as possible).  */
1023
  bbs = get_loop_body (loop);
1024
  for (i = 0; i < loop->num_nodes; i++)
1025
    bbs[i]->loop_father = loop->outer;
1026
 
1027
  /* Remove the loop from structure.  */
1028
  flow_loop_tree_node_remove (loop);
1029
 
1030
  /* Remove loop from loops array.  */
1031
  loops->parray[loop->num] = NULL;
1032
 
1033
  /* Free loop data.  */
1034
  flow_loop_free (loop);
1035
}
1036
 
1037
/* Cancels LOOP and all its subloops.  */
1038
void
1039
cancel_loop_tree (struct loops *loops, struct loop *loop)
1040
{
1041
  while (loop->inner)
1042
    cancel_loop_tree (loops, loop->inner);
1043
  cancel_loop (loops, loop);
1044
}
1045
 
1046
/* Checks that LOOPS are all right:
1047
     -- sizes of loops are all right
1048
     -- results of get_loop_body really belong to the loop
1049
     -- loop header have just single entry edge and single latch edge
1050
     -- loop latches have only single successor that is header of their loop
1051
     -- irreducible loops are correctly marked
1052
  */
1053
void
1054
verify_loop_structure (struct loops *loops)
1055
{
1056
  unsigned *sizes, i, j;
1057
  sbitmap irreds;
1058
  basic_block *bbs, bb;
1059
  struct loop *loop;
1060
  int err = 0;
1061
  edge e;
1062
 
1063
  /* Check sizes.  */
1064
  sizes = XCNEWVEC (unsigned, loops->num);
1065
  sizes[0] = 2;
1066
 
1067
  FOR_EACH_BB (bb)
1068
    for (loop = bb->loop_father; loop; loop = loop->outer)
1069
      sizes[loop->num]++;
1070
 
1071
  for (i = 0; i < loops->num; i++)
1072
    {
1073
      if (!loops->parray[i])
1074
        continue;
1075
 
1076
      if (loops->parray[i]->num_nodes != sizes[i])
1077
        {
1078
          error ("size of loop %d should be %d, not %d",
1079
                   i, sizes[i], loops->parray[i]->num_nodes);
1080
          err = 1;
1081
        }
1082
    }
1083
 
1084
  /* Check get_loop_body.  */
1085
  for (i = 1; i < loops->num; i++)
1086
    {
1087
      loop = loops->parray[i];
1088
      if (!loop)
1089
        continue;
1090
      bbs = get_loop_body (loop);
1091
 
1092
      for (j = 0; j < loop->num_nodes; j++)
1093
        if (!flow_bb_inside_loop_p (loop, bbs[j]))
1094
          {
1095
            error ("bb %d do not belong to loop %d",
1096
                    bbs[j]->index, i);
1097
            err = 1;
1098
          }
1099
      free (bbs);
1100
    }
1101
 
1102
  /* Check headers and latches.  */
1103
  for (i = 1; i < loops->num; i++)
1104
    {
1105
      loop = loops->parray[i];
1106
      if (!loop)
1107
        continue;
1108
 
1109
      if ((loops->state & LOOPS_HAVE_PREHEADERS)
1110
          && EDGE_COUNT (loop->header->preds) != 2)
1111
        {
1112
          error ("loop %d's header does not have exactly 2 entries", i);
1113
          err = 1;
1114
        }
1115
      if (loops->state & LOOPS_HAVE_SIMPLE_LATCHES)
1116
        {
1117
          if (!single_succ_p (loop->latch))
1118
            {
1119
              error ("loop %d's latch does not have exactly 1 successor", i);
1120
              err = 1;
1121
            }
1122
          if (single_succ (loop->latch) != loop->header)
1123
            {
1124
              error ("loop %d's latch does not have header as successor", i);
1125
              err = 1;
1126
            }
1127
          if (loop->latch->loop_father != loop)
1128
            {
1129
              error ("loop %d's latch does not belong directly to it", i);
1130
              err = 1;
1131
            }
1132
        }
1133
      if (loop->header->loop_father != loop)
1134
        {
1135
          error ("loop %d's header does not belong directly to it", i);
1136
          err = 1;
1137
        }
1138
      if ((loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1139
          && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1140
        {
1141
          error ("loop %d's latch is marked as part of irreducible region", i);
1142
          err = 1;
1143
        }
1144
    }
1145
 
1146
  /* Check irreducible loops.  */
1147
  if (loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1148
    {
1149
      /* Record old info.  */
1150
      irreds = sbitmap_alloc (last_basic_block);
1151
      FOR_EACH_BB (bb)
1152
        {
1153
          edge_iterator ei;
1154
          if (bb->flags & BB_IRREDUCIBLE_LOOP)
1155
            SET_BIT (irreds, bb->index);
1156
          else
1157
            RESET_BIT (irreds, bb->index);
1158
          FOR_EACH_EDGE (e, ei, bb->succs)
1159
            if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1160
              e->flags |= EDGE_ALL_FLAGS + 1;
1161
        }
1162
 
1163
      /* Recount it.  */
1164
      mark_irreducible_loops (loops);
1165
 
1166
      /* Compare.  */
1167
      FOR_EACH_BB (bb)
1168
        {
1169
          edge_iterator ei;
1170
 
1171
          if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1172
              && !TEST_BIT (irreds, bb->index))
1173
            {
1174
              error ("basic block %d should be marked irreducible", bb->index);
1175
              err = 1;
1176
            }
1177
          else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1178
              && TEST_BIT (irreds, bb->index))
1179
            {
1180
              error ("basic block %d should not be marked irreducible", bb->index);
1181
              err = 1;
1182
            }
1183
          FOR_EACH_EDGE (e, ei, bb->succs)
1184
            {
1185
              if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1186
                  && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1187
                {
1188
                  error ("edge from %d to %d should be marked irreducible",
1189
                         e->src->index, e->dest->index);
1190
                  err = 1;
1191
                }
1192
              else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1193
                       && (e->flags & (EDGE_ALL_FLAGS + 1)))
1194
                {
1195
                  error ("edge from %d to %d should not be marked irreducible",
1196
                         e->src->index, e->dest->index);
1197
                  err = 1;
1198
                }
1199
              e->flags &= ~(EDGE_ALL_FLAGS + 1);
1200
            }
1201
        }
1202
      free (irreds);
1203
    }
1204
 
1205
  /* Check the single_exit.  */
1206
  if (loops->state & LOOPS_HAVE_MARKED_SINGLE_EXITS)
1207
    {
1208
      memset (sizes, 0, sizeof (unsigned) * loops->num);
1209
      FOR_EACH_BB (bb)
1210
        {
1211
          edge_iterator ei;
1212
          if (bb->loop_father == loops->tree_root)
1213
            continue;
1214
          FOR_EACH_EDGE (e, ei, bb->succs)
1215
            {
1216
              if (e->dest == EXIT_BLOCK_PTR)
1217
                continue;
1218
 
1219
              if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1220
                continue;
1221
 
1222
              for (loop = bb->loop_father;
1223
                   loop != e->dest->loop_father;
1224
                   loop = loop->outer)
1225
                {
1226
                  sizes[loop->num]++;
1227
                  if (loop->single_exit
1228
                      && loop->single_exit != e)
1229
                    {
1230
                      error ("wrong single exit %d->%d recorded for loop %d",
1231
                             loop->single_exit->src->index,
1232
                             loop->single_exit->dest->index,
1233
                             loop->num);
1234
                      error ("right exit is %d->%d",
1235
                             e->src->index, e->dest->index);
1236
                      err = 1;
1237
                    }
1238
                }
1239
            }
1240
        }
1241
 
1242
      for (i = 1; i < loops->num; i++)
1243
        {
1244
          loop = loops->parray[i];
1245
          if (!loop)
1246
            continue;
1247
 
1248
          if (sizes[i] == 1
1249
              && !loop->single_exit)
1250
            {
1251
              error ("single exit not recorded for loop %d", loop->num);
1252
              err = 1;
1253
            }
1254
 
1255
          if (sizes[i] != 1
1256
              && loop->single_exit)
1257
            {
1258
              error ("loop %d should not have single exit (%d -> %d)",
1259
                     loop->num,
1260
                     loop->single_exit->src->index,
1261
                     loop->single_exit->dest->index);
1262
              err = 1;
1263
            }
1264
        }
1265
    }
1266
 
1267
  gcc_assert (!err);
1268
 
1269
  free (sizes);
1270
}
1271
 
1272
/* Returns latch edge of LOOP.  */
1273
edge
1274
loop_latch_edge (const struct loop *loop)
1275
{
1276
  return find_edge (loop->latch, loop->header);
1277
}
1278
 
1279
/* Returns preheader edge of LOOP.  */
1280
edge
1281
loop_preheader_edge (const struct loop *loop)
1282
{
1283
  edge e;
1284
  edge_iterator ei;
1285
 
1286
  FOR_EACH_EDGE (e, ei, loop->header->preds)
1287
    if (e->src != loop->latch)
1288
      break;
1289
 
1290
  return e;
1291
}
1292
 
1293
/* Returns true if E is an exit of LOOP.  */
1294
 
1295
bool
1296
loop_exit_edge_p (const struct loop *loop, edge e)
1297
{
1298
  return (flow_bb_inside_loop_p (loop, e->src)
1299
          && !flow_bb_inside_loop_p (loop, e->dest));
1300
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.