OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [tree-cfgcleanup.c] - Blame information for rev 774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* CFG cleanup for trees.
2
   Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License as published by
9
the Free Software Foundation; either version 3, or (at your option)
10
any later version.
11
 
12
GCC is distributed in the hope that it will be useful,
13
but WITHOUT ANY WARRANTY; without even the implied warranty of
14
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
GNU General Public License for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "tree.h"
26
#include "tm_p.h"
27
#include "basic-block.h"
28
#include "output.h"
29
#include "diagnostic-core.h"
30
#include "flags.h"
31
#include "function.h"
32
#include "ggc.h"
33
#include "langhooks.h"
34
#include "tree-flow.h"
35
#include "timevar.h"
36
#include "tree-dump.h"
37
#include "tree-pass.h"
38
#include "except.h"
39
#include "cfgloop.h"
40
#include "cfglayout.h"
41
#include "hashtab.h"
42
#include "tree-ssa-propagate.h"
43
#include "tree-scalar-evolution.h"
44
 
45
/* The set of blocks in that at least one of the following changes happened:
46
   -- the statement at the end of the block was changed
47
   -- the block was newly created
48
   -- the set of the predecessors of the block changed
49
   -- the set of the successors of the block changed
50
   ??? Maybe we could track these changes separately, since they determine
51
       what cleanups it makes sense to try on the block.  */
52
bitmap cfgcleanup_altered_bbs;
53
 
54
/* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
55
 
56
static bool
57
remove_fallthru_edge (VEC(edge,gc) *ev)
58
{
59
  edge_iterator ei;
60
  edge e;
61
 
62
  FOR_EACH_EDGE (e, ei, ev)
63
    if ((e->flags & EDGE_FALLTHRU) != 0)
64
      {
65
        remove_edge_and_dominated_blocks (e);
66
        return true;
67
      }
68
  return false;
69
}
70
 
71
 
72
/* Disconnect an unreachable block in the control expression starting
73
   at block BB.  */
74
 
75
static bool
76
cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
77
{
78
  edge taken_edge;
79
  bool retval = false;
80
  gimple stmt = gsi_stmt (gsi);
81
  tree val;
82
 
83
  if (!single_succ_p (bb))
84
    {
85
      edge e;
86
      edge_iterator ei;
87
      bool warned;
88
      location_t loc;
89
 
90
      fold_defer_overflow_warnings ();
91
      loc = gimple_location (stmt);
92
      switch (gimple_code (stmt))
93
        {
94
        case GIMPLE_COND:
95
          {
96
            tree lhs = gimple_cond_lhs (stmt);
97
            tree rhs = gimple_cond_rhs (stmt);
98
            /* For conditions try harder and lookup single-argument
99
               PHI nodes.  Only do so from the same basic-block though
100
               as other basic-blocks may be dead already.  */
101
            if (TREE_CODE (lhs) == SSA_NAME
102
                && !name_registered_for_update_p (lhs))
103
              {
104
                gimple def_stmt = SSA_NAME_DEF_STMT (lhs);
105
                if (gimple_code (def_stmt) == GIMPLE_PHI
106
                    && gimple_phi_num_args (def_stmt) == 1
107
                    && gimple_bb (def_stmt) == gimple_bb (stmt)
108
                    && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
109
                        || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
110
                                                                       0))))
111
                  lhs = PHI_ARG_DEF (def_stmt, 0);
112
              }
113
            if (TREE_CODE (rhs) == SSA_NAME
114
                && !name_registered_for_update_p (rhs))
115
              {
116
                gimple def_stmt = SSA_NAME_DEF_STMT (rhs);
117
                if (gimple_code (def_stmt) == GIMPLE_PHI
118
                    && gimple_phi_num_args (def_stmt) == 1
119
                    && gimple_bb (def_stmt) == gimple_bb (stmt)
120
                    && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
121
                        || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
122
                                                                       0))))
123
                  rhs = PHI_ARG_DEF (def_stmt, 0);
124
              }
125
            val = fold_binary_loc (loc, gimple_cond_code (stmt),
126
                                   boolean_type_node, lhs, rhs);
127
            break;
128
          }
129
 
130
        case GIMPLE_SWITCH:
131
          val = gimple_switch_index (stmt);
132
          break;
133
 
134
        default:
135
          val = NULL_TREE;
136
        }
137
      taken_edge = find_taken_edge (bb, val);
138
      if (!taken_edge)
139
        {
140
          fold_undefer_and_ignore_overflow_warnings ();
141
          return false;
142
        }
143
 
144
      /* Remove all the edges except the one that is always executed.  */
145
      warned = false;
146
      for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
147
        {
148
          if (e != taken_edge)
149
            {
150
              if (!warned)
151
                {
152
                  fold_undefer_overflow_warnings
153
                    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
154
                  warned = true;
155
                }
156
 
157
              taken_edge->probability += e->probability;
158
              taken_edge->count += e->count;
159
              remove_edge_and_dominated_blocks (e);
160
              retval = true;
161
            }
162
          else
163
            ei_next (&ei);
164
        }
165
      if (!warned)
166
        fold_undefer_and_ignore_overflow_warnings ();
167
      if (taken_edge->probability > REG_BR_PROB_BASE)
168
        taken_edge->probability = REG_BR_PROB_BASE;
169
    }
170
  else
171
    taken_edge = single_succ_edge (bb);
172
 
173
  bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
174
  gsi_remove (&gsi, true);
175
  taken_edge->flags = EDGE_FALLTHRU;
176
 
177
  return retval;
178
}
179
 
180
/* Try to remove superfluous control structures in basic block BB.  Returns
181
   true if anything changes.  */
182
 
183
static bool
184
cleanup_control_flow_bb (basic_block bb)
185
{
186
  gimple_stmt_iterator gsi;
187
  bool retval = false;
188
  gimple stmt;
189
 
190
  /* If the last statement of the block could throw and now cannot,
191
     we need to prune cfg.  */
192
  retval |= gimple_purge_dead_eh_edges (bb);
193
 
194
  gsi = gsi_last_bb (bb);
195
  if (gsi_end_p (gsi))
196
    return retval;
197
 
198
  stmt = gsi_stmt (gsi);
199
 
200
  if (gimple_code (stmt) == GIMPLE_COND
201
      || gimple_code (stmt) == GIMPLE_SWITCH)
202
    retval |= cleanup_control_expr_graph (bb, gsi);
203
  else if (gimple_code (stmt) == GIMPLE_GOTO
204
           && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
205
           && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
206
               == LABEL_DECL))
207
    {
208
      /* If we had a computed goto which has a compile-time determinable
209
         destination, then we can eliminate the goto.  */
210
      edge e;
211
      tree label;
212
      edge_iterator ei;
213
      basic_block target_block;
214
 
215
      /* First look at all the outgoing edges.  Delete any outgoing
216
         edges which do not go to the right block.  For the one
217
         edge which goes to the right block, fix up its flags.  */
218
      label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
219
      target_block = label_to_block (label);
220
      for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
221
        {
222
          if (e->dest != target_block)
223
            remove_edge_and_dominated_blocks (e);
224
          else
225
            {
226
              /* Turn off the EDGE_ABNORMAL flag.  */
227
              e->flags &= ~EDGE_ABNORMAL;
228
 
229
              /* And set EDGE_FALLTHRU.  */
230
              e->flags |= EDGE_FALLTHRU;
231
              ei_next (&ei);
232
            }
233
        }
234
 
235
      bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
236
      bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
237
 
238
      /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
239
         relevant information we need.  */
240
      gsi_remove (&gsi, true);
241
      retval = true;
242
    }
243
 
244
  /* Check for indirect calls that have been turned into
245
     noreturn calls.  */
246
  else if (is_gimple_call (stmt)
247
           && gimple_call_noreturn_p (stmt)
248
           && remove_fallthru_edge (bb->succs))
249
    retval = true;
250
 
251
  return retval;
252
}
253
 
254
/* Return true if basic block BB does nothing except pass control
255
   flow to another block and that we can safely insert a label at
256
   the start of the successor block.
257
 
258
   As a precondition, we require that BB be not equal to
259
   ENTRY_BLOCK_PTR.  */
260
 
261
static bool
262
tree_forwarder_block_p (basic_block bb, bool phi_wanted)
263
{
264
  gimple_stmt_iterator gsi;
265
  location_t locus;
266
 
267
  /* BB must have a single outgoing edge.  */
268
  if (single_succ_p (bb) != 1
269
      /* If PHI_WANTED is false, BB must not have any PHI nodes.
270
         Otherwise, BB must have PHI nodes.  */
271
      || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
272
      /* BB may not be a predecessor of EXIT_BLOCK_PTR.  */
273
      || single_succ (bb) == EXIT_BLOCK_PTR
274
      /* Nor should this be an infinite loop.  */
275
      || single_succ (bb) == bb
276
      /* BB may not have an abnormal outgoing edge.  */
277
      || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
278
    return false;
279
 
280
  gcc_checking_assert (bb != ENTRY_BLOCK_PTR);
281
 
282
  locus = single_succ_edge (bb)->goto_locus;
283
 
284
  /* There should not be an edge coming from entry, or an EH edge.  */
285
  {
286
    edge_iterator ei;
287
    edge e;
288
 
289
    FOR_EACH_EDGE (e, ei, bb->preds)
290
      if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
291
        return false;
292
      /* If goto_locus of any of the edges differs, prevent removing
293
         the forwarder block for -O0.  */
294
      else if (optimize == 0 && e->goto_locus != locus)
295
        return false;
296
  }
297
 
298
  /* Now walk through the statements backward.  We can ignore labels,
299
     anything else means this is not a forwarder block.  */
300
  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
301
    {
302
      gimple stmt = gsi_stmt (gsi);
303
 
304
      switch (gimple_code (stmt))
305
        {
306
        case GIMPLE_LABEL:
307
          if (DECL_NONLOCAL (gimple_label_label (stmt)))
308
            return false;
309
          if (optimize == 0 && gimple_location (stmt) != locus)
310
            return false;
311
          break;
312
 
313
          /* ??? For now, hope there's a corresponding debug
314
             assignment at the destination.  */
315
        case GIMPLE_DEBUG:
316
          break;
317
 
318
        default:
319
          return false;
320
        }
321
    }
322
 
323
  if (current_loops)
324
    {
325
      basic_block dest;
326
      /* Protect loop latches, headers and preheaders.  */
327
      if (bb->loop_father->header == bb)
328
        return false;
329
      dest = EDGE_SUCC (bb, 0)->dest;
330
 
331
      if (dest->loop_father->header == dest)
332
        return false;
333
    }
334
  return true;
335
}
336
 
337
/* If all the PHI nodes in DEST have alternatives for E1 and E2 and
338
   those alternatives are equal in each of the PHI nodes, then return
339
   true, else return false.  */
340
 
341
static bool
342
phi_alternatives_equal (basic_block dest, edge e1, edge e2)
343
{
344
  int n1 = e1->dest_idx;
345
  int n2 = e2->dest_idx;
346
  gimple_stmt_iterator gsi;
347
 
348
  for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
349
    {
350
      gimple phi = gsi_stmt (gsi);
351
      tree val1 = gimple_phi_arg_def (phi, n1);
352
      tree val2 = gimple_phi_arg_def (phi, n2);
353
 
354
      gcc_assert (val1 != NULL_TREE);
355
      gcc_assert (val2 != NULL_TREE);
356
 
357
      if (!operand_equal_for_phi_arg_p (val1, val2))
358
        return false;
359
    }
360
 
361
  return true;
362
}
363
 
364
/* Removes forwarder block BB.  Returns false if this failed.  */
365
 
366
static bool
367
remove_forwarder_block (basic_block bb)
368
{
369
  edge succ = single_succ_edge (bb), e, s;
370
  basic_block dest = succ->dest;
371
  gimple label;
372
  edge_iterator ei;
373
  gimple_stmt_iterator gsi, gsi_to;
374
  bool can_move_debug_stmts;
375
 
376
  /* We check for infinite loops already in tree_forwarder_block_p.
377
     However it may happen that the infinite loop is created
378
     afterwards due to removal of forwarders.  */
379
  if (dest == bb)
380
    return false;
381
 
382
  /* If the destination block consists of a nonlocal label or is a
383
     EH landing pad, do not merge it.  */
384
  label = first_stmt (dest);
385
  if (label
386
      && gimple_code (label) == GIMPLE_LABEL
387
      && (DECL_NONLOCAL (gimple_label_label (label))
388
          || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
389
    return false;
390
 
391
  /* If there is an abnormal edge to basic block BB, but not into
392
     dest, problems might occur during removal of the phi node at out
393
     of ssa due to overlapping live ranges of registers.
394
 
395
     If there is an abnormal edge in DEST, the problems would occur
396
     anyway since cleanup_dead_labels would then merge the labels for
397
     two different eh regions, and rest of exception handling code
398
     does not like it.
399
 
400
     So if there is an abnormal edge to BB, proceed only if there is
401
     no abnormal edge to DEST and there are no phi nodes in DEST.  */
402
  if (bb_has_abnormal_pred (bb)
403
      && (bb_has_abnormal_pred (dest)
404
          || !gimple_seq_empty_p (phi_nodes (dest))))
405
    return false;
406
 
407
  /* If there are phi nodes in DEST, and some of the blocks that are
408
     predecessors of BB are also predecessors of DEST, check that the
409
     phi node arguments match.  */
410
  if (!gimple_seq_empty_p (phi_nodes (dest)))
411
    {
412
      FOR_EACH_EDGE (e, ei, bb->preds)
413
        {
414
          s = find_edge (e->src, dest);
415
          if (!s)
416
            continue;
417
 
418
          if (!phi_alternatives_equal (dest, succ, s))
419
            return false;
420
        }
421
    }
422
 
423
  can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
424
 
425
  /* Redirect the edges.  */
426
  for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
427
    {
428
      bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
429
 
430
      if (e->flags & EDGE_ABNORMAL)
431
        {
432
          /* If there is an abnormal edge, redirect it anyway, and
433
             move the labels to the new block to make it legal.  */
434
          s = redirect_edge_succ_nodup (e, dest);
435
        }
436
      else
437
        s = redirect_edge_and_branch (e, dest);
438
 
439
      if (s == e)
440
        {
441
          /* Create arguments for the phi nodes, since the edge was not
442
             here before.  */
443
          for (gsi = gsi_start_phis (dest);
444
               !gsi_end_p (gsi);
445
               gsi_next (&gsi))
446
            {
447
              gimple phi = gsi_stmt (gsi);
448
              source_location l = gimple_phi_arg_location_from_edge (phi, succ);
449
              add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s, l);
450
            }
451
        }
452
    }
453
 
454
  /* Move nonlocal labels and computed goto targets as well as user
455
     defined labels and labels with an EH landing pad number to the
456
     new block, so that the redirection of the abnormal edges works,
457
     jump targets end up in a sane place and debug information for
458
     labels is retained.  */
459
  gsi_to = gsi_start_bb (dest);
460
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
461
    {
462
      tree decl;
463
      label = gsi_stmt (gsi);
464
      if (is_gimple_debug (label))
465
        break;
466
      decl = gimple_label_label (label);
467
      if (EH_LANDING_PAD_NR (decl) != 0
468
          || DECL_NONLOCAL (decl)
469
          || FORCED_LABEL (decl)
470
          || !DECL_ARTIFICIAL (decl))
471
        {
472
          gsi_remove (&gsi, false);
473
          gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
474
        }
475
      else
476
        gsi_next (&gsi);
477
    }
478
 
479
  /* Move debug statements if the destination has a single predecessor.  */
480
  if (can_move_debug_stmts)
481
    {
482
      gsi_to = gsi_after_labels (dest);
483
      for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
484
        {
485
          gimple debug = gsi_stmt (gsi);
486
          if (!is_gimple_debug (debug))
487
            break;
488
          gsi_remove (&gsi, false);
489
          gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
490
        }
491
    }
492
 
493
  bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
494
 
495
  /* Update the dominators.  */
496
  if (dom_info_available_p (CDI_DOMINATORS))
497
    {
498
      basic_block dom, dombb, domdest;
499
 
500
      dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
501
      domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
502
      if (domdest == bb)
503
        {
504
          /* Shortcut to avoid calling (relatively expensive)
505
             nearest_common_dominator unless necessary.  */
506
          dom = dombb;
507
        }
508
      else
509
        dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
510
 
511
      set_immediate_dominator (CDI_DOMINATORS, dest, dom);
512
    }
513
 
514
  /* And kill the forwarder block.  */
515
  delete_basic_block (bb);
516
 
517
  return true;
518
}
519
 
520
/* STMT is a call that has been discovered noreturn.  Fixup the CFG
521
   and remove LHS.  Return true if something changed.  */
522
 
523
bool
524
fixup_noreturn_call (gimple stmt)
525
{
526
  basic_block bb = gimple_bb (stmt);
527
  bool changed = false;
528
 
529
  if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
530
    return false;
531
 
532
  /* First split basic block if stmt is not last.  */
533
  if (stmt != gsi_stmt (gsi_last_bb (bb)))
534
    split_block (bb, stmt);
535
 
536
  changed |= remove_fallthru_edge (bb->succs);
537
 
538
  /* If there is LHS, remove it.  */
539
  if (gimple_call_lhs (stmt))
540
    {
541
      tree op = gimple_call_lhs (stmt);
542
      gimple_call_set_lhs (stmt, NULL_TREE);
543
 
544
      /* We need to remove SSA name to avoid checking errors.
545
         All uses are dominated by the noreturn and thus will
546
         be removed afterwards.
547
         We proactively remove affected non-PHI statements to avoid
548
         fixup_cfg from trying to update them and crashing.  */
549
      if (TREE_CODE (op) == SSA_NAME)
550
        {
551
          use_operand_p use_p;
552
          imm_use_iterator iter;
553
          gimple use_stmt;
554
          bitmap_iterator bi;
555
          unsigned int bb_index;
556
 
557
          bitmap blocks = BITMAP_ALLOC (NULL);
558
 
559
          FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
560
            {
561
              if (gimple_code (use_stmt) != GIMPLE_PHI)
562
                bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
563
              else
564
                FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
565
                  SET_USE (use_p, error_mark_node);
566
            }
567
          EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
568
            delete_basic_block (BASIC_BLOCK (bb_index));
569
          BITMAP_FREE (blocks);
570
          release_ssa_name (op);
571
        }
572
      update_stmt (stmt);
573
      changed = true;
574
    }
575
  /* Similarly remove VDEF if there is any.  */
576
  else if (gimple_vdef (stmt))
577
    update_stmt (stmt);
578
  return changed;
579
}
580
 
581
 
582
/* Split basic blocks on calls in the middle of a basic block that are now
583
   known not to return, and remove the unreachable code.  */
584
 
585
static bool
586
split_bbs_on_noreturn_calls (void)
587
{
588
  bool changed = false;
589
  gimple stmt;
590
  basic_block bb;
591
 
592
  /* Detect cases where a mid-block call is now known not to return.  */
593
  if (cfun->gimple_df)
594
    while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun)))
595
      {
596
        stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun));
597
        bb = gimple_bb (stmt);
598
        /* BB might be deleted at this point, so verify first
599
           BB is present in the cfg.  */
600
        if (bb == NULL
601
            || bb->index < NUM_FIXED_BLOCKS
602
            || bb->index >= last_basic_block
603
            || BASIC_BLOCK (bb->index) != bb
604
            || !gimple_call_noreturn_p (stmt))
605
          continue;
606
 
607
        changed |= fixup_noreturn_call (stmt);
608
      }
609
 
610
  return changed;
611
}
612
 
613
/* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it.  */
614
 
615
static bool
616
cleanup_omp_return (basic_block bb)
617
{
618
  gimple stmt = last_stmt (bb);
619
  basic_block control_bb;
620
 
621
  if (stmt == NULL
622
      || gimple_code (stmt) != GIMPLE_OMP_RETURN
623
      || !single_pred_p (bb))
624
    return false;
625
 
626
  control_bb = single_pred (bb);
627
  stmt = last_stmt (control_bb);
628
 
629
  if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH)
630
    return false;
631
 
632
  /* The block with the control statement normally has two entry edges -- one
633
     from entry, one from continue.  If continue is removed, return is
634
     unreachable, so we remove it here as well.  */
635
  if (EDGE_COUNT (control_bb->preds) == 2)
636
    return false;
637
 
638
  gcc_assert (EDGE_COUNT (control_bb->preds) == 1);
639
  remove_edge_and_dominated_blocks (single_pred_edge (bb));
640
  return true;
641
}
642
 
643
/* Tries to cleanup cfg in basic block BB.  Returns true if anything
644
   changes.  */
645
 
646
static bool
647
cleanup_tree_cfg_bb (basic_block bb)
648
{
649
  bool retval = false;
650
 
651
  if (cleanup_omp_return (bb))
652
    return true;
653
 
654
  retval = cleanup_control_flow_bb (bb);
655
 
656
  if (tree_forwarder_block_p (bb, false)
657
      && remove_forwarder_block (bb))
658
    return true;
659
 
660
  /* Merging the blocks may create new opportunities for folding
661
     conditional branches (due to the elimination of single-valued PHI
662
     nodes).  */
663
  if (single_succ_p (bb)
664
      && can_merge_blocks_p (bb, single_succ (bb)))
665
    {
666
      merge_blocks (bb, single_succ (bb));
667
      return true;
668
    }
669
 
670
  return retval;
671
}
672
 
673
/* Iterate the cfg cleanups, while anything changes.  */
674
 
675
static bool
676
cleanup_tree_cfg_1 (void)
677
{
678
  bool retval = false;
679
  basic_block bb;
680
  unsigned i, n;
681
 
682
  retval |= split_bbs_on_noreturn_calls ();
683
 
684
  /* Prepare the worklists of altered blocks.  */
685
  cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
686
 
687
  /* During forwarder block cleanup, we may redirect edges out of
688
     SWITCH_EXPRs, which can get expensive.  So we want to enable
689
     recording of edge to CASE_LABEL_EXPR.  */
690
  start_recording_case_labels ();
691
 
692
  /* Start by iterating over all basic blocks.  We cannot use FOR_EACH_BB,
693
     since the basic blocks may get removed.  */
694
  n = last_basic_block;
695
  for (i = NUM_FIXED_BLOCKS; i < n; i++)
696
    {
697
      bb = BASIC_BLOCK (i);
698
      if (bb)
699
        retval |= cleanup_tree_cfg_bb (bb);
700
    }
701
 
702
  /* Now process the altered blocks, as long as any are available.  */
703
  while (!bitmap_empty_p (cfgcleanup_altered_bbs))
704
    {
705
      i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
706
      bitmap_clear_bit (cfgcleanup_altered_bbs, i);
707
      if (i < NUM_FIXED_BLOCKS)
708
        continue;
709
 
710
      bb = BASIC_BLOCK (i);
711
      if (!bb)
712
        continue;
713
 
714
      retval |= cleanup_tree_cfg_bb (bb);
715
 
716
      /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
717
         calls.  */
718
      retval |= split_bbs_on_noreturn_calls ();
719
    }
720
 
721
  end_recording_case_labels ();
722
  BITMAP_FREE (cfgcleanup_altered_bbs);
723
  return retval;
724
}
725
 
726
 
727
/* Remove unreachable blocks and other miscellaneous clean up work.
728
   Return true if the flowgraph was modified, false otherwise.  */
729
 
730
static bool
731
cleanup_tree_cfg_noloop (void)
732
{
733
  bool changed;
734
 
735
  timevar_push (TV_TREE_CLEANUP_CFG);
736
 
737
  /* Iterate until there are no more cleanups left to do.  If any
738
     iteration changed the flowgraph, set CHANGED to true.
739
 
740
     If dominance information is available, there cannot be any unreachable
741
     blocks.  */
742
  if (!dom_info_available_p (CDI_DOMINATORS))
743
    {
744
      changed = delete_unreachable_blocks ();
745
      calculate_dominance_info (CDI_DOMINATORS);
746
    }
747
  else
748
    {
749
#ifdef ENABLE_CHECKING
750
      verify_dominators (CDI_DOMINATORS);
751
#endif
752
      changed = false;
753
    }
754
 
755
  changed |= cleanup_tree_cfg_1 ();
756
 
757
  gcc_assert (dom_info_available_p (CDI_DOMINATORS));
758
  compact_blocks ();
759
 
760
#ifdef ENABLE_CHECKING
761
  verify_flow_info ();
762
#endif
763
 
764
  timevar_pop (TV_TREE_CLEANUP_CFG);
765
 
766
  if (changed && current_loops)
767
    loops_state_set (LOOPS_NEED_FIXUP);
768
 
769
  return changed;
770
}
771
 
772
/* Repairs loop structures.  */
773
 
774
static void
775
repair_loop_structures (void)
776
{
777
  bitmap changed_bbs;
778
 
779
  timevar_push (TV_REPAIR_LOOPS);
780
  changed_bbs = BITMAP_ALLOC (NULL);
781
  fix_loop_structure (changed_bbs);
782
 
783
  /* This usually does nothing.  But sometimes parts of cfg that originally
784
     were inside a loop get out of it due to edge removal (since they
785
     become unreachable by back edges from latch).  */
786
  if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
787
    rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa);
788
 
789
  BITMAP_FREE (changed_bbs);
790
 
791
#ifdef ENABLE_CHECKING
792
  verify_loop_structure ();
793
#endif
794
  scev_reset ();
795
 
796
  loops_state_clear (LOOPS_NEED_FIXUP);
797
  timevar_pop (TV_REPAIR_LOOPS);
798
}
799
 
800
/* Cleanup cfg and repair loop structures.  */
801
 
802
bool
803
cleanup_tree_cfg (void)
804
{
805
  bool changed = cleanup_tree_cfg_noloop ();
806
 
807
  if (current_loops != NULL
808
      && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
809
    repair_loop_structures ();
810
 
811
  return changed;
812
}
813
 
814
/* Merge the PHI nodes at BB into those at BB's sole successor.  */
815
 
816
static void
817
remove_forwarder_block_with_phi (basic_block bb)
818
{
819
  edge succ = single_succ_edge (bb);
820
  basic_block dest = succ->dest;
821
  gimple label;
822
  basic_block dombb, domdest, dom;
823
 
824
  /* We check for infinite loops already in tree_forwarder_block_p.
825
     However it may happen that the infinite loop is created
826
     afterwards due to removal of forwarders.  */
827
  if (dest == bb)
828
    return;
829
 
830
  /* If the destination block consists of a nonlocal label, do not
831
     merge it.  */
832
  label = first_stmt (dest);
833
  if (label
834
      && gimple_code (label) == GIMPLE_LABEL
835
      && DECL_NONLOCAL (gimple_label_label (label)))
836
    return;
837
 
838
  /* Redirect each incoming edge to BB to DEST.  */
839
  while (EDGE_COUNT (bb->preds) > 0)
840
    {
841
      edge e = EDGE_PRED (bb, 0), s;
842
      gimple_stmt_iterator gsi;
843
 
844
      s = find_edge (e->src, dest);
845
      if (s)
846
        {
847
          /* We already have an edge S from E->src to DEST.  If S and
848
             E->dest's sole successor edge have the same PHI arguments
849
             at DEST, redirect S to DEST.  */
850
          if (phi_alternatives_equal (dest, s, succ))
851
            {
852
              e = redirect_edge_and_branch (e, dest);
853
              redirect_edge_var_map_clear (e);
854
              continue;
855
            }
856
 
857
          /* PHI arguments are different.  Create a forwarder block by
858
             splitting E so that we can merge PHI arguments on E to
859
             DEST.  */
860
          e = single_succ_edge (split_edge (e));
861
        }
862
 
863
      s = redirect_edge_and_branch (e, dest);
864
 
865
      /* redirect_edge_and_branch must not create a new edge.  */
866
      gcc_assert (s == e);
867
 
868
      /* Add to the PHI nodes at DEST each PHI argument removed at the
869
         destination of E.  */
870
      for (gsi = gsi_start_phis (dest);
871
           !gsi_end_p (gsi);
872
           gsi_next (&gsi))
873
        {
874
          gimple phi = gsi_stmt (gsi);
875
          tree def = gimple_phi_arg_def (phi, succ->dest_idx);
876
          source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
877
 
878
          if (TREE_CODE (def) == SSA_NAME)
879
            {
880
              edge_var_map_vector head;
881
              edge_var_map *vm;
882
              size_t i;
883
 
884
              /* If DEF is one of the results of PHI nodes removed during
885
                 redirection, replace it with the PHI argument that used
886
                 to be on E.  */
887
              head = redirect_edge_var_map_vector (e);
888
              FOR_EACH_VEC_ELT (edge_var_map, head, i, vm)
889
                {
890
                  tree old_arg = redirect_edge_var_map_result (vm);
891
                  tree new_arg = redirect_edge_var_map_def (vm);
892
 
893
                  if (def == old_arg)
894
                    {
895
                      def = new_arg;
896
                      locus = redirect_edge_var_map_location (vm);
897
                      break;
898
                    }
899
                }
900
            }
901
 
902
          add_phi_arg (phi, def, s, locus);
903
        }
904
 
905
      redirect_edge_var_map_clear (e);
906
    }
907
 
908
  /* Update the dominators.  */
909
  dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
910
  domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
911
  if (domdest == bb)
912
    {
913
      /* Shortcut to avoid calling (relatively expensive)
914
         nearest_common_dominator unless necessary.  */
915
      dom = dombb;
916
    }
917
  else
918
    dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
919
 
920
  set_immediate_dominator (CDI_DOMINATORS, dest, dom);
921
 
922
  /* Remove BB since all of BB's incoming edges have been redirected
923
     to DEST.  */
924
  delete_basic_block (bb);
925
}
926
 
927
/* This pass merges PHI nodes if one feeds into another.  For example,
928
   suppose we have the following:
929
 
930
  goto <bb 9> (<L9>);
931
 
932
<L8>:;
933
  tem_17 = foo ();
934
 
935
  # tem_6 = PHI <tem_17(8), tem_23(7)>;
936
<L9>:;
937
 
938
  # tem_3 = PHI <tem_6(9), tem_2(5)>;
939
<L10>:;
940
 
941
  Then we merge the first PHI node into the second one like so:
942
 
943
  goto <bb 9> (<L10>);
944
 
945
<L8>:;
946
  tem_17 = foo ();
947
 
948
  # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
949
<L10>:;
950
*/
951
 
952
static unsigned int
953
merge_phi_nodes (void)
954
{
955
  basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
956
  basic_block *current = worklist;
957
  basic_block bb;
958
 
959
  calculate_dominance_info (CDI_DOMINATORS);
960
 
961
  /* Find all PHI nodes that we may be able to merge.  */
962
  FOR_EACH_BB (bb)
963
    {
964
      basic_block dest;
965
 
966
      /* Look for a forwarder block with PHI nodes.  */
967
      if (!tree_forwarder_block_p (bb, true))
968
        continue;
969
 
970
      dest = single_succ (bb);
971
 
972
      /* We have to feed into another basic block with PHI
973
         nodes.  */
974
      if (gimple_seq_empty_p (phi_nodes (dest))
975
          /* We don't want to deal with a basic block with
976
             abnormal edges.  */
977
          || bb_has_abnormal_pred (bb))
978
        continue;
979
 
980
      if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
981
        {
982
          /* If BB does not dominate DEST, then the PHI nodes at
983
             DEST must be the only users of the results of the PHI
984
             nodes at BB.  */
985
          *current++ = bb;
986
        }
987
      else
988
        {
989
          gimple_stmt_iterator gsi;
990
          unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
991
 
992
          /* BB dominates DEST.  There may be many users of the PHI
993
             nodes in BB.  However, there is still a trivial case we
994
             can handle.  If the result of every PHI in BB is used
995
             only by a PHI in DEST, then we can trivially merge the
996
             PHI nodes from BB into DEST.  */
997
          for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
998
               gsi_next (&gsi))
999
            {
1000
              gimple phi = gsi_stmt (gsi);
1001
              tree result = gimple_phi_result (phi);
1002
              use_operand_p imm_use;
1003
              gimple use_stmt;
1004
 
1005
              /* If the PHI's result is never used, then we can just
1006
                 ignore it.  */
1007
              if (has_zero_uses (result))
1008
                continue;
1009
 
1010
              /* Get the single use of the result of this PHI node.  */
1011
              if (!single_imm_use (result, &imm_use, &use_stmt)
1012
                  || gimple_code (use_stmt) != GIMPLE_PHI
1013
                  || gimple_bb (use_stmt) != dest
1014
                  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1015
                break;
1016
            }
1017
 
1018
          /* If the loop above iterated through all the PHI nodes
1019
             in BB, then we can merge the PHIs from BB into DEST.  */
1020
          if (gsi_end_p (gsi))
1021
            *current++ = bb;
1022
        }
1023
    }
1024
 
1025
  /* Now let's drain WORKLIST.  */
1026
  while (current != worklist)
1027
    {
1028
      bb = *--current;
1029
      remove_forwarder_block_with_phi (bb);
1030
    }
1031
 
1032
  free (worklist);
1033
  return 0;
1034
}
1035
 
1036
static bool
1037
gate_merge_phi (void)
1038
{
1039
  return 1;
1040
}
1041
 
1042
struct gimple_opt_pass pass_merge_phi =
1043
{
1044
 {
1045
  GIMPLE_PASS,
1046
  "mergephi",                   /* name */
1047
  gate_merge_phi,               /* gate */
1048
  merge_phi_nodes,              /* execute */
1049
  NULL,                         /* sub */
1050
  NULL,                         /* next */
1051
  0,                             /* static_pass_number */
1052
  TV_TREE_MERGE_PHI,            /* tv_id */
1053
  PROP_cfg | PROP_ssa,          /* properties_required */
1054
  0,                             /* properties_provided */
1055
  0,                             /* properties_destroyed */
1056
  0,                             /* todo_flags_start */
1057
  TODO_ggc_collect              /* todo_flags_finish */
1058
  | TODO_verify_ssa
1059
 }
1060
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.