OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [cfgcleanup.c] - Blame information for rev 634

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Control flow optimization code for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
/* This file contains optimizer of the control flow.  The main entry point is
22
   cleanup_cfg.  Following optimizations are performed:
23
 
24
   - Unreachable blocks removal
25
   - Edge forwarding (edge to the forwarder block is forwarded to its
26
     successor.  Simplification of the branch instruction is performed by
27
     underlying infrastructure so branch can be converted to simplejump or
28
     eliminated).
29
   - Cross jumping (tail merging)
30
   - Conditional jump-around-simplejump simplification
31
   - Basic block merging.  */
32
 
33
#include "config.h"
34
#include "system.h"
35
#include "coretypes.h"
36
#include "tm.h"
37
#include "rtl.h"
38
#include "hard-reg-set.h"
39
#include "regs.h"
40
#include "timevar.h"
41
#include "output.h"
42
#include "insn-config.h"
43
#include "flags.h"
44
#include "recog.h"
45
#include "toplev.h"
46
#include "cselib.h"
47
#include "params.h"
48
#include "tm_p.h"
49
#include "target.h"
50
#include "cfglayout.h"
51
#include "emit-rtl.h"
52
#include "tree-pass.h"
53
#include "cfgloop.h"
54
#include "expr.h"
55
 
56
#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
57
 
58
/* Set to true when we are running first pass of try_optimize_cfg loop.  */
59
static bool first_pass;
60
static bool try_crossjump_to_edge (int, edge, edge);
61
static bool try_crossjump_bb (int, basic_block);
62
static bool outgoing_edges_match (int, basic_block, basic_block);
63
static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
64
static bool old_insns_match_p (int, rtx, rtx);
65
 
66
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
67
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
68
static bool try_optimize_cfg (int);
69
static bool try_simplify_condjump (basic_block);
70
static bool try_forward_edges (int, basic_block);
71
static edge thread_jump (int, edge, basic_block);
72
static bool mark_effect (rtx, bitmap);
73
static void notice_new_block (basic_block);
74
static void update_forwarder_flag (basic_block);
75
static int mentions_nonequal_regs (rtx *, void *);
76
static void merge_memattrs (rtx, rtx);
77
 
78
/* Set flags for newly created block.  */
79
 
80
static void
81
notice_new_block (basic_block bb)
82
{
83
  if (!bb)
84
    return;
85
 
86
  if (forwarder_block_p (bb))
87
    bb->flags |= BB_FORWARDER_BLOCK;
88
}
89
 
90
/* Recompute forwarder flag after block has been modified.  */
91
 
92
static void
93
update_forwarder_flag (basic_block bb)
94
{
95
  if (forwarder_block_p (bb))
96
    bb->flags |= BB_FORWARDER_BLOCK;
97
  else
98
    bb->flags &= ~BB_FORWARDER_BLOCK;
99
}
100
 
101
/* Simplify a conditional jump around an unconditional jump.
102
   Return true if something changed.  */
103
 
104
static bool
105
try_simplify_condjump (basic_block cbranch_block)
106
{
107
  basic_block jump_block, jump_dest_block, cbranch_dest_block;
108
  edge cbranch_jump_edge, cbranch_fallthru_edge;
109
  rtx cbranch_insn;
110
 
111
  /* Verify that there are exactly two successors.  */
112
  if (EDGE_COUNT (cbranch_block->succs) != 2)
113
    return false;
114
 
115
  /* Verify that we've got a normal conditional branch at the end
116
     of the block.  */
117
  cbranch_insn = BB_END (cbranch_block);
118
  if (!any_condjump_p (cbranch_insn))
119
    return false;
120
 
121
  cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
122
  cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
123
 
124
  /* The next block must not have multiple predecessors, must not
125
     be the last block in the function, and must contain just the
126
     unconditional jump.  */
127
  jump_block = cbranch_fallthru_edge->dest;
128
  if (!single_pred_p (jump_block)
129
      || jump_block->next_bb == EXIT_BLOCK_PTR
130
      || !FORWARDER_BLOCK_P (jump_block))
131
    return false;
132
  jump_dest_block = single_succ (jump_block);
133
 
134
  /* If we are partitioning hot/cold basic blocks, we don't want to
135
     mess up unconditional or indirect jumps that cross between hot
136
     and cold sections.
137
 
138
     Basic block partitioning may result in some jumps that appear to
139
     be optimizable (or blocks that appear to be mergeable), but which really
140
     must be left untouched (they are required to make it safely across
141
     partition boundaries).  See the comments at the top of
142
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
143
 
144
  if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
145
      || (cbranch_jump_edge->flags & EDGE_CROSSING))
146
    return false;
147
 
148
  /* The conditional branch must target the block after the
149
     unconditional branch.  */
150
  cbranch_dest_block = cbranch_jump_edge->dest;
151
 
152
  if (cbranch_dest_block == EXIT_BLOCK_PTR
153
      || !can_fallthru (jump_block, cbranch_dest_block))
154
    return false;
155
 
156
  /* Invert the conditional branch.  */
157
  if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
158
    return false;
159
 
160
  if (dump_file)
161
    fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
162
             INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
163
 
164
  /* Success.  Update the CFG to match.  Note that after this point
165
     the edge variable names appear backwards; the redirection is done
166
     this way to preserve edge profile data.  */
167
  cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
168
                                                cbranch_dest_block);
169
  cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
170
                                                    jump_dest_block);
171
  cbranch_jump_edge->flags |= EDGE_FALLTHRU;
172
  cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
173
  update_br_prob_note (cbranch_block);
174
 
175
  /* Delete the block with the unconditional jump, and clean up the mess.  */
176
  delete_basic_block (jump_block);
177
  tidy_fallthru_edge (cbranch_jump_edge);
178
  update_forwarder_flag (cbranch_block);
179
 
180
  return true;
181
}
182
 
183
/* Attempt to prove that operation is NOOP using CSElib or mark the effect
184
   on register.  Used by jump threading.  */
185
 
186
static bool
187
mark_effect (rtx exp, regset nonequal)
188
{
189
  int regno;
190
  rtx dest;
191
  switch (GET_CODE (exp))
192
    {
193
      /* In case we do clobber the register, mark it as equal, as we know the
194
         value is dead so it don't have to match.  */
195
    case CLOBBER:
196
      if (REG_P (XEXP (exp, 0)))
197
        {
198
          dest = XEXP (exp, 0);
199
          regno = REGNO (dest);
200
          CLEAR_REGNO_REG_SET (nonequal, regno);
201
          if (regno < FIRST_PSEUDO_REGISTER)
202
            {
203
              int n = hard_regno_nregs[regno][GET_MODE (dest)];
204
              while (--n > 0)
205
                CLEAR_REGNO_REG_SET (nonequal, regno + n);
206
            }
207
        }
208
      return false;
209
 
210
    case SET:
211
      if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
212
        return false;
213
      dest = SET_DEST (exp);
214
      if (dest == pc_rtx)
215
        return false;
216
      if (!REG_P (dest))
217
        return true;
218
      regno = REGNO (dest);
219
      SET_REGNO_REG_SET (nonequal, regno);
220
      if (regno < FIRST_PSEUDO_REGISTER)
221
        {
222
          int n = hard_regno_nregs[regno][GET_MODE (dest)];
223
          while (--n > 0)
224
            SET_REGNO_REG_SET (nonequal, regno + n);
225
        }
226
      return false;
227
 
228
    default:
229
      return false;
230
    }
231
}
232
 
233
/* Return nonzero if X is a register set in regset DATA.
234
   Called via for_each_rtx.  */
235
static int
236
mentions_nonequal_regs (rtx *x, void *data)
237
{
238
  regset nonequal = (regset) data;
239
  if (REG_P (*x))
240
    {
241
      int regno;
242
 
243
      regno = REGNO (*x);
244
      if (REGNO_REG_SET_P (nonequal, regno))
245
        return 1;
246
      if (regno < FIRST_PSEUDO_REGISTER)
247
        {
248
          int n = hard_regno_nregs[regno][GET_MODE (*x)];
249
          while (--n > 0)
250
            if (REGNO_REG_SET_P (nonequal, regno + n))
251
              return 1;
252
        }
253
    }
254
  return 0;
255
}
256
/* Attempt to prove that the basic block B will have no side effects and
257
   always continues in the same edge if reached via E.  Return the edge
258
   if exist, NULL otherwise.  */
259
 
260
static edge
261
thread_jump (int mode, edge e, basic_block b)
262
{
263
  rtx set1, set2, cond1, cond2, insn;
264
  enum rtx_code code1, code2, reversed_code2;
265
  bool reverse1 = false;
266
  unsigned i;
267
  regset nonequal;
268
  bool failed = false;
269
  reg_set_iterator rsi;
270
 
271
  if (b->flags & BB_NONTHREADABLE_BLOCK)
272
    return NULL;
273
 
274
  /* At the moment, we do handle only conditional jumps, but later we may
275
     want to extend this code to tablejumps and others.  */
276
  if (EDGE_COUNT (e->src->succs) != 2)
277
    return NULL;
278
  if (EDGE_COUNT (b->succs) != 2)
279
    {
280
      b->flags |= BB_NONTHREADABLE_BLOCK;
281
      return NULL;
282
    }
283
 
284
  /* Second branch must end with onlyjump, as we will eliminate the jump.  */
285
  if (!any_condjump_p (BB_END (e->src)))
286
    return NULL;
287
 
288
  if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
289
    {
290
      b->flags |= BB_NONTHREADABLE_BLOCK;
291
      return NULL;
292
    }
293
 
294
  set1 = pc_set (BB_END (e->src));
295
  set2 = pc_set (BB_END (b));
296
  if (((e->flags & EDGE_FALLTHRU) != 0)
297
      != (XEXP (SET_SRC (set1), 1) == pc_rtx))
298
    reverse1 = true;
299
 
300
  cond1 = XEXP (SET_SRC (set1), 0);
301
  cond2 = XEXP (SET_SRC (set2), 0);
302
  if (reverse1)
303
    code1 = reversed_comparison_code (cond1, BB_END (e->src));
304
  else
305
    code1 = GET_CODE (cond1);
306
 
307
  code2 = GET_CODE (cond2);
308
  reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
309
 
310
  if (!comparison_dominates_p (code1, code2)
311
      && !comparison_dominates_p (code1, reversed_code2))
312
    return NULL;
313
 
314
  /* Ensure that the comparison operators are equivalent.
315
     ??? This is far too pessimistic.  We should allow swapped operands,
316
     different CCmodes, or for example comparisons for interval, that
317
     dominate even when operands are not equivalent.  */
318
  if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
319
      || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
320
    return NULL;
321
 
322
  /* Short circuit cases where block B contains some side effects, as we can't
323
     safely bypass it.  */
324
  for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
325
       insn = NEXT_INSN (insn))
326
    if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
327
      {
328
        b->flags |= BB_NONTHREADABLE_BLOCK;
329
        return NULL;
330
      }
331
 
332
  cselib_init (false);
333
 
334
  /* First process all values computed in the source basic block.  */
335
  for (insn = NEXT_INSN (BB_HEAD (e->src));
336
       insn != NEXT_INSN (BB_END (e->src));
337
       insn = NEXT_INSN (insn))
338
    if (INSN_P (insn))
339
      cselib_process_insn (insn);
340
 
341
  nonequal = BITMAP_ALLOC (NULL);
342
  CLEAR_REG_SET (nonequal);
343
 
344
  /* Now assume that we've continued by the edge E to B and continue
345
     processing as if it were same basic block.
346
     Our goal is to prove that whole block is an NOOP.  */
347
 
348
  for (insn = NEXT_INSN (BB_HEAD (b));
349
       insn != NEXT_INSN (BB_END (b)) && !failed;
350
       insn = NEXT_INSN (insn))
351
    {
352
      if (INSN_P (insn))
353
        {
354
          rtx pat = PATTERN (insn);
355
 
356
          if (GET_CODE (pat) == PARALLEL)
357
            {
358
              for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
359
                failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
360
            }
361
          else
362
            failed |= mark_effect (pat, nonequal);
363
        }
364
 
365
      cselib_process_insn (insn);
366
    }
367
 
368
  /* Later we should clear nonequal of dead registers.  So far we don't
369
     have life information in cfg_cleanup.  */
370
  if (failed)
371
    {
372
      b->flags |= BB_NONTHREADABLE_BLOCK;
373
      goto failed_exit;
374
    }
375
 
376
  /* cond2 must not mention any register that is not equal to the
377
     former block.  */
378
  if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
379
    goto failed_exit;
380
 
381
  /* In case liveness information is available, we need to prove equivalence
382
     only of the live values.  */
383
  if (mode & CLEANUP_UPDATE_LIFE)
384
    AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
385
 
386
  EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
387
    goto failed_exit;
388
 
389
  BITMAP_FREE (nonequal);
390
  cselib_finish ();
391
  if ((comparison_dominates_p (code1, code2) != 0)
392
      != (XEXP (SET_SRC (set2), 1) == pc_rtx))
393
    return BRANCH_EDGE (b);
394
  else
395
    return FALLTHRU_EDGE (b);
396
 
397
failed_exit:
398
  BITMAP_FREE (nonequal);
399
  cselib_finish ();
400
  return NULL;
401
}
402
 
403
/* Attempt to forward edges leaving basic block B.
404
   Return true if successful.  */
405
 
406
static bool
407
try_forward_edges (int mode, basic_block b)
408
{
409
  bool changed = false;
410
  edge_iterator ei;
411
  edge e, *threaded_edges = NULL;
412
 
413
  /* If we are partitioning hot/cold basic blocks, we don't want to
414
     mess up unconditional or indirect jumps that cross between hot
415
     and cold sections.
416
 
417
     Basic block partitioning may result in some jumps that appear to
418
     be optimizable (or blocks that appear to be mergeable), but which really m
419
     ust be left untouched (they are required to make it safely across
420
     partition boundaries).  See the comments at the top of
421
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
422
 
423
  if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
424
    return false;
425
 
426
  for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
427
    {
428
      basic_block target, first;
429
      int counter;
430
      bool threaded = false;
431
      int nthreaded_edges = 0;
432
      bool may_thread = first_pass | (b->flags & BB_DIRTY);
433
 
434
      /* Skip complex edges because we don't know how to update them.
435
 
436
         Still handle fallthru edges, as we can succeed to forward fallthru
437
         edge to the same place as the branch edge of conditional branch
438
         and turn conditional branch to an unconditional branch.  */
439
      if (e->flags & EDGE_COMPLEX)
440
        {
441
          ei_next (&ei);
442
          continue;
443
        }
444
 
445
      target = first = e->dest;
446
      counter = NUM_FIXED_BLOCKS;
447
 
448
      /* If we are partitioning hot/cold basic_blocks, we don't want to mess
449
         up jumps that cross between hot/cold sections.
450
 
451
         Basic block partitioning may result in some jumps that appear
452
         to be optimizable (or blocks that appear to be mergeable), but which
453
         really must be left untouched (they are required to make it safely
454
         across partition boundaries).  See the comments at the top of
455
         bb-reorder.c:partition_hot_cold_basic_blocks for complete
456
         details.  */
457
 
458
      if (first != EXIT_BLOCK_PTR
459
          && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
460
        return false;
461
 
462
      while (counter < n_basic_blocks)
463
        {
464
          basic_block new_target = NULL;
465
          bool new_target_threaded = false;
466
          may_thread |= target->flags & BB_DIRTY;
467
 
468
          if (FORWARDER_BLOCK_P (target)
469
              && !(single_succ_edge (target)->flags & EDGE_CROSSING)
470
              && single_succ (target) != EXIT_BLOCK_PTR)
471
            {
472
              /* Bypass trivial infinite loops.  */
473
              new_target = single_succ (target);
474
              if (target == new_target)
475
                counter = n_basic_blocks;
476
            }
477
 
478
          /* Allow to thread only over one edge at time to simplify updating
479
             of probabilities.  */
480
          else if ((mode & CLEANUP_THREADING) && may_thread)
481
            {
482
              edge t = thread_jump (mode, e, target);
483
              if (t)
484
                {
485
                  if (!threaded_edges)
486
                    threaded_edges = XNEWVEC (edge, n_basic_blocks);
487
                  else
488
                    {
489
                      int i;
490
 
491
                      /* Detect an infinite loop across blocks not
492
                         including the start block.  */
493
                      for (i = 0; i < nthreaded_edges; ++i)
494
                        if (threaded_edges[i] == t)
495
                          break;
496
                      if (i < nthreaded_edges)
497
                        {
498
                          counter = n_basic_blocks;
499
                          break;
500
                        }
501
                    }
502
 
503
                  /* Detect an infinite loop across the start block.  */
504
                  if (t->dest == b)
505
                    break;
506
 
507
                  gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
508
                  threaded_edges[nthreaded_edges++] = t;
509
 
510
                  new_target = t->dest;
511
                  new_target_threaded = true;
512
                }
513
            }
514
 
515
          if (!new_target)
516
            break;
517
 
518
          counter++;
519
          target = new_target;
520
          threaded |= new_target_threaded;
521
        }
522
 
523
      if (counter >= n_basic_blocks)
524
        {
525
          if (dump_file)
526
            fprintf (dump_file, "Infinite loop in BB %i.\n",
527
                     target->index);
528
        }
529
      else if (target == first)
530
        ; /* We didn't do anything.  */
531
      else
532
        {
533
          /* Save the values now, as the edge may get removed.  */
534
          gcov_type edge_count = e->count;
535
          int edge_probability = e->probability;
536
          int edge_frequency;
537
          int n = 0;
538
 
539
          /* Don't force if target is exit block.  */
540
          if (threaded && target != EXIT_BLOCK_PTR)
541
            {
542
              notice_new_block (redirect_edge_and_branch_force (e, target));
543
              if (dump_file)
544
                fprintf (dump_file, "Conditionals threaded.\n");
545
            }
546
          else if (!redirect_edge_and_branch (e, target))
547
            {
548
              if (dump_file)
549
                fprintf (dump_file,
550
                         "Forwarding edge %i->%i to %i failed.\n",
551
                         b->index, e->dest->index, target->index);
552
              ei_next (&ei);
553
              continue;
554
            }
555
 
556
          /* We successfully forwarded the edge.  Now update profile
557
             data: for each edge we traversed in the chain, remove
558
             the original edge's execution count.  */
559
          edge_frequency = ((edge_probability * b->frequency
560
                             + REG_BR_PROB_BASE / 2)
561
                            / REG_BR_PROB_BASE);
562
 
563
          if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
564
            b->flags |= BB_FORWARDER_BLOCK;
565
 
566
          do
567
            {
568
              edge t;
569
 
570
              if (!single_succ_p (first))
571
                {
572
                  gcc_assert (n < nthreaded_edges);
573
                  t = threaded_edges [n++];
574
                  gcc_assert (t->src == first);
575
                  update_bb_profile_for_threading (first, edge_frequency,
576
                                                   edge_count, t);
577
                  update_br_prob_note (first);
578
                }
579
              else
580
                {
581
                  first->count -= edge_count;
582
                  if (first->count < 0)
583
                    first->count = 0;
584
                  first->frequency -= edge_frequency;
585
                  if (first->frequency < 0)
586
                    first->frequency = 0;
587
                  /* It is possible that as the result of
588
                     threading we've removed edge as it is
589
                     threaded to the fallthru edge.  Avoid
590
                     getting out of sync.  */
591
                  if (n < nthreaded_edges
592
                      && first == threaded_edges [n]->src)
593
                    n++;
594
                  t = single_succ_edge (first);
595
                }
596
 
597
              t->count -= edge_count;
598
              if (t->count < 0)
599
                t->count = 0;
600
              first = t->dest;
601
            }
602
          while (first != target);
603
 
604
          changed = true;
605
          continue;
606
        }
607
      ei_next (&ei);
608
    }
609
 
610
  if (threaded_edges)
611
    free (threaded_edges);
612
  return changed;
613
}
614
 
615
 
616
/* Blocks A and B are to be merged into a single block.  A has no incoming
617
   fallthru edge, so it can be moved before B without adding or modifying
618
   any jumps (aside from the jump from A to B).  */
619
 
620
static void
621
merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
622
{
623
  rtx barrier;
624
  bool only_notes;
625
 
626
  /* If we are partitioning hot/cold basic blocks, we don't want to
627
     mess up unconditional or indirect jumps that cross between hot
628
     and cold sections.
629
 
630
     Basic block partitioning may result in some jumps that appear to
631
     be optimizable (or blocks that appear to be mergeable), but which really
632
     must be left untouched (they are required to make it safely across
633
     partition boundaries).  See the comments at the top of
634
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
635
 
636
  if (BB_PARTITION (a) != BB_PARTITION (b))
637
    return;
638
 
639
  barrier = next_nonnote_insn (BB_END (a));
640
  gcc_assert (BARRIER_P (barrier));
641
  delete_insn (barrier);
642
 
643
  /* Move block and loop notes out of the chain so that we do not
644
     disturb their order.
645
 
646
     ??? A better solution would be to squeeze out all the non-nested notes
647
     and adjust the block trees appropriately.   Even better would be to have
648
     a tighter connection between block trees and rtl so that this is not
649
     necessary.  */
650
  only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
651
  gcc_assert (!only_notes);
652
 
653
  /* Scramble the insn chain.  */
654
  if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
655
    reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
656
  a->flags |= BB_DIRTY;
657
 
658
  if (dump_file)
659
    fprintf (dump_file, "Moved block %d before %d and merged.\n",
660
             a->index, b->index);
661
 
662
  /* Swap the records for the two blocks around.  */
663
 
664
  unlink_block (a);
665
  link_block (a, b->prev_bb);
666
 
667
  /* Now blocks A and B are contiguous.  Merge them.  */
668
  merge_blocks (a, b);
669
}
670
 
671
/* Blocks A and B are to be merged into a single block.  B has no outgoing
672
   fallthru edge, so it can be moved after A without adding or modifying
673
   any jumps (aside from the jump from A to B).  */
674
 
675
static void
676
merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
677
{
678
  rtx barrier, real_b_end;
679
  rtx label, table;
680
  bool only_notes;
681
 
682
  /* If we are partitioning hot/cold basic blocks, we don't want to
683
     mess up unconditional or indirect jumps that cross between hot
684
     and cold sections.
685
 
686
     Basic block partitioning may result in some jumps that appear to
687
     be optimizable (or blocks that appear to be mergeable), but which really
688
     must be left untouched (they are required to make it safely across
689
     partition boundaries).  See the comments at the top of
690
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
691
 
692
  if (BB_PARTITION (a) != BB_PARTITION (b))
693
    return;
694
 
695
  real_b_end = BB_END (b);
696
 
697
  /* If there is a jump table following block B temporarily add the jump table
698
     to block B so that it will also be moved to the correct location.  */
699
  if (tablejump_p (BB_END (b), &label, &table)
700
      && prev_active_insn (label) == BB_END (b))
701
    {
702
      BB_END (b) = table;
703
    }
704
 
705
  /* There had better have been a barrier there.  Delete it.  */
706
  barrier = NEXT_INSN (BB_END (b));
707
  if (barrier && BARRIER_P (barrier))
708
    delete_insn (barrier);
709
 
710
  /* Move block and loop notes out of the chain so that we do not
711
     disturb their order.
712
 
713
     ??? A better solution would be to squeeze out all the non-nested notes
714
     and adjust the block trees appropriately.   Even better would be to have
715
     a tighter connection between block trees and rtl so that this is not
716
     necessary.  */
717
  only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
718
  gcc_assert (!only_notes);
719
 
720
 
721
  /* Scramble the insn chain.  */
722
  reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
723
 
724
  /* Restore the real end of b.  */
725
  BB_END (b) = real_b_end;
726
 
727
  if (dump_file)
728
    fprintf (dump_file, "Moved block %d after %d and merged.\n",
729
             b->index, a->index);
730
 
731
  /* Now blocks A and B are contiguous.  Merge them.  */
732
  merge_blocks (a, b);
733
}
734
 
735
/* Attempt to merge basic blocks that are potentially non-adjacent.
736
   Return NULL iff the attempt failed, otherwise return basic block
737
   where cleanup_cfg should continue.  Because the merging commonly
738
   moves basic block away or introduces another optimization
739
   possibility, return basic block just before B so cleanup_cfg don't
740
   need to iterate.
741
 
742
   It may be good idea to return basic block before C in the case
743
   C has been moved after B and originally appeared earlier in the
744
   insn sequence, but we have no information available about the
745
   relative ordering of these two.  Hopefully it is not too common.  */
746
 
747
static basic_block
748
merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
749
{
750
  basic_block next;
751
 
752
  /* If we are partitioning hot/cold basic blocks, we don't want to
753
     mess up unconditional or indirect jumps that cross between hot
754
     and cold sections.
755
 
756
     Basic block partitioning may result in some jumps that appear to
757
     be optimizable (or blocks that appear to be mergeable), but which really
758
     must be left untouched (they are required to make it safely across
759
     partition boundaries).  See the comments at the top of
760
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
761
 
762
  if (BB_PARTITION (b) != BB_PARTITION (c))
763
    return NULL;
764
 
765
 
766
 
767
  /* If B has a fallthru edge to C, no need to move anything.  */
768
  if (e->flags & EDGE_FALLTHRU)
769
    {
770
      int b_index = b->index, c_index = c->index;
771
      merge_blocks (b, c);
772
      update_forwarder_flag (b);
773
 
774
      if (dump_file)
775
        fprintf (dump_file, "Merged %d and %d without moving.\n",
776
                 b_index, c_index);
777
 
778
      return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
779
    }
780
 
781
  /* Otherwise we will need to move code around.  Do that only if expensive
782
     transformations are allowed.  */
783
  else if (mode & CLEANUP_EXPENSIVE)
784
    {
785
      edge tmp_edge, b_fallthru_edge;
786
      bool c_has_outgoing_fallthru;
787
      bool b_has_incoming_fallthru;
788
      edge_iterator ei;
789
 
790
      /* Avoid overactive code motion, as the forwarder blocks should be
791
         eliminated by edge redirection instead.  One exception might have
792
         been if B is a forwarder block and C has no fallthru edge, but
793
         that should be cleaned up by bb-reorder instead.  */
794
      if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
795
        return NULL;
796
 
797
      /* We must make sure to not munge nesting of lexical blocks,
798
         and loop notes.  This is done by squeezing out all the notes
799
         and leaving them there to lie.  Not ideal, but functional.  */
800
 
801
      FOR_EACH_EDGE (tmp_edge, ei, c->succs)
802
        if (tmp_edge->flags & EDGE_FALLTHRU)
803
          break;
804
 
805
      c_has_outgoing_fallthru = (tmp_edge != NULL);
806
 
807
      FOR_EACH_EDGE (tmp_edge, ei, b->preds)
808
        if (tmp_edge->flags & EDGE_FALLTHRU)
809
          break;
810
 
811
      b_has_incoming_fallthru = (tmp_edge != NULL);
812
      b_fallthru_edge = tmp_edge;
813
      next = b->prev_bb;
814
      if (next == c)
815
        next = next->prev_bb;
816
 
817
      /* Otherwise, we're going to try to move C after B.  If C does
818
         not have an outgoing fallthru, then it can be moved
819
         immediately after B without introducing or modifying jumps.  */
820
      if (! c_has_outgoing_fallthru)
821
        {
822
          merge_blocks_move_successor_nojumps (b, c);
823
          return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
824
        }
825
 
826
      /* If B does not have an incoming fallthru, then it can be moved
827
         immediately before C without introducing or modifying jumps.
828
         C cannot be the first block, so we do not have to worry about
829
         accessing a non-existent block.  */
830
 
831
      if (b_has_incoming_fallthru)
832
        {
833
          basic_block bb;
834
 
835
          if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
836
            return NULL;
837
          bb = force_nonfallthru (b_fallthru_edge);
838
          if (bb)
839
            notice_new_block (bb);
840
        }
841
 
842
      merge_blocks_move_predecessor_nojumps (b, c);
843
      return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
844
    }
845
 
846
  return NULL;
847
}
848
 
849
 
850
/* Removes the memory attributes of MEM expression
851
   if they are not equal.  */
852
 
853
void
854
merge_memattrs (rtx x, rtx y)
855
{
856
  int i;
857
  int j;
858
  enum rtx_code code;
859
  const char *fmt;
860
 
861
  if (x == y)
862
    return;
863
  if (x == 0 || y == 0)
864
    return;
865
 
866
  code = GET_CODE (x);
867
 
868
  if (code != GET_CODE (y))
869
    return;
870
 
871
  if (GET_MODE (x) != GET_MODE (y))
872
    return;
873
 
874
  if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
875
    {
876
      if (! MEM_ATTRS (x))
877
        MEM_ATTRS (y) = 0;
878
      else if (! MEM_ATTRS (y))
879
        MEM_ATTRS (x) = 0;
880
      else
881
        {
882
          rtx mem_size;
883
 
884
          if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
885
            {
886
              set_mem_alias_set (x, 0);
887
              set_mem_alias_set (y, 0);
888
            }
889
 
890
          if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
891
            {
892
              set_mem_expr (x, 0);
893
              set_mem_expr (y, 0);
894
              set_mem_offset (x, 0);
895
              set_mem_offset (y, 0);
896
            }
897
          else if (MEM_OFFSET (x) != MEM_OFFSET (y))
898
            {
899
              set_mem_offset (x, 0);
900
              set_mem_offset (y, 0);
901
            }
902
 
903
          if (!MEM_SIZE (x))
904
            mem_size = NULL_RTX;
905
          else if (!MEM_SIZE (y))
906
            mem_size = NULL_RTX;
907
          else
908
            mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
909
                                     INTVAL (MEM_SIZE (y))));
910
          set_mem_size (x, mem_size);
911
          set_mem_size (y, mem_size);
912
 
913
          set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
914
          set_mem_align (y, MEM_ALIGN (x));
915
        }
916
    }
917
 
918
  fmt = GET_RTX_FORMAT (code);
919
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
920
    {
921
      switch (fmt[i])
922
        {
923
        case 'E':
924
          /* Two vectors must have the same length.  */
925
          if (XVECLEN (x, i) != XVECLEN (y, i))
926
            return;
927
 
928
          for (j = 0; j < XVECLEN (x, i); j++)
929
            merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
930
 
931
          break;
932
 
933
        case 'e':
934
          merge_memattrs (XEXP (x, i), XEXP (y, i));
935
        }
936
    }
937
  return;
938
}
939
 
940
 
941
/* Return true if I1 and I2 are equivalent and thus can be crossjumped.  */
942
 
943
static bool
944
old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
945
{
946
  rtx p1, p2;
947
 
948
  /* Verify that I1 and I2 are equivalent.  */
949
  if (GET_CODE (i1) != GET_CODE (i2))
950
    return false;
951
 
952
  p1 = PATTERN (i1);
953
  p2 = PATTERN (i2);
954
 
955
  if (GET_CODE (p1) != GET_CODE (p2))
956
    return false;
957
 
958
  /* If this is a CALL_INSN, compare register usage information.
959
     If we don't check this on stack register machines, the two
960
     CALL_INSNs might be merged leaving reg-stack.c with mismatching
961
     numbers of stack registers in the same basic block.
962
     If we don't check this on machines with delay slots, a delay slot may
963
     be filled that clobbers a parameter expected by the subroutine.
964
 
965
     ??? We take the simple route for now and assume that if they're
966
     equal, they were constructed identically.  */
967
 
968
  if (CALL_P (i1)
969
      && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
970
                        CALL_INSN_FUNCTION_USAGE (i2))
971
          || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
972
    return false;
973
 
974
#ifdef STACK_REGS
975
  /* If cross_jump_death_matters is not 0, the insn's mode
976
     indicates whether or not the insn contains any stack-like
977
     regs.  */
978
 
979
  if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
980
    {
981
      /* If register stack conversion has already been done, then
982
         death notes must also be compared before it is certain that
983
         the two instruction streams match.  */
984
 
985
      rtx note;
986
      HARD_REG_SET i1_regset, i2_regset;
987
 
988
      CLEAR_HARD_REG_SET (i1_regset);
989
      CLEAR_HARD_REG_SET (i2_regset);
990
 
991
      for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
992
        if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
993
          SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
994
 
995
      for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
996
        if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
997
          SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
998
 
999
      GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1000
 
1001
      return false;
1002
 
1003
    done:
1004
      ;
1005
    }
1006
#endif
1007
 
1008
  if (reload_completed
1009
      ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1010
    return true;
1011
 
1012
  /* Do not do EQUIV substitution after reload.  First, we're undoing the
1013
     work of reload_cse.  Second, we may be undoing the work of the post-
1014
     reload splitting pass.  */
1015
  /* ??? Possibly add a new phase switch variable that can be used by
1016
     targets to disallow the troublesome insns after splitting.  */
1017
  if (!reload_completed)
1018
    {
1019
      /* The following code helps take care of G++ cleanups.  */
1020
      rtx equiv1 = find_reg_equal_equiv_note (i1);
1021
      rtx equiv2 = find_reg_equal_equiv_note (i2);
1022
 
1023
      if (equiv1 && equiv2
1024
          /* If the equivalences are not to a constant, they may
1025
             reference pseudos that no longer exist, so we can't
1026
             use them.  */
1027
          && (! reload_completed
1028
              || (CONSTANT_P (XEXP (equiv1, 0))
1029
                  && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1030
        {
1031
          rtx s1 = single_set (i1);
1032
          rtx s2 = single_set (i2);
1033
          if (s1 != 0 && s2 != 0
1034
              && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1035
            {
1036
              validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1037
              validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1038
              if (! rtx_renumbered_equal_p (p1, p2))
1039
                cancel_changes (0);
1040
              else if (apply_change_group ())
1041
                return true;
1042
            }
1043
        }
1044
    }
1045
 
1046
  return false;
1047
}
1048
 
1049
/* Look through the insns at the end of BB1 and BB2 and find the longest
1050
   sequence that are equivalent.  Store the first insns for that sequence
1051
   in *F1 and *F2 and return the sequence length.
1052
 
1053
   To simplify callers of this function, if the blocks match exactly,
1054
   store the head of the blocks in *F1 and *F2.  */
1055
 
1056
static int
1057
flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1058
                      basic_block bb2, rtx *f1, rtx *f2)
1059
{
1060
  rtx i1, i2, last1, last2, afterlast1, afterlast2;
1061
  int ninsns = 0;
1062
 
1063
  /* Skip simple jumps at the end of the blocks.  Complex jumps still
1064
     need to be compared for equivalence, which we'll do below.  */
1065
 
1066
  i1 = BB_END (bb1);
1067
  last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1068
  if (onlyjump_p (i1)
1069
      || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1070
    {
1071
      last1 = i1;
1072
      i1 = PREV_INSN (i1);
1073
    }
1074
 
1075
  i2 = BB_END (bb2);
1076
  if (onlyjump_p (i2)
1077
      || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1078
    {
1079
      last2 = i2;
1080
      /* Count everything except for unconditional jump as insn.  */
1081
      if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1082
        ninsns++;
1083
      i2 = PREV_INSN (i2);
1084
    }
1085
 
1086
  while (true)
1087
    {
1088
      /* Ignore notes.  */
1089
      while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1090
        i1 = PREV_INSN (i1);
1091
 
1092
      while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1093
        i2 = PREV_INSN (i2);
1094
 
1095
      if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1096
        break;
1097
 
1098
      if (!old_insns_match_p (mode, i1, i2))
1099
        break;
1100
 
1101
      merge_memattrs (i1, i2);
1102
 
1103
      /* Don't begin a cross-jump with a NOTE insn.  */
1104
      if (INSN_P (i1))
1105
        {
1106
          /* If the merged insns have different REG_EQUAL notes, then
1107
             remove them.  */
1108
          rtx equiv1 = find_reg_equal_equiv_note (i1);
1109
          rtx equiv2 = find_reg_equal_equiv_note (i2);
1110
 
1111
          if (equiv1 && !equiv2)
1112
            remove_note (i1, equiv1);
1113
          else if (!equiv1 && equiv2)
1114
            remove_note (i2, equiv2);
1115
          else if (equiv1 && equiv2
1116
                   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1117
            {
1118
              remove_note (i1, equiv1);
1119
              remove_note (i2, equiv2);
1120
            }
1121
 
1122
          afterlast1 = last1, afterlast2 = last2;
1123
          last1 = i1, last2 = i2;
1124
          ninsns++;
1125
        }
1126
 
1127
      i1 = PREV_INSN (i1);
1128
      i2 = PREV_INSN (i2);
1129
    }
1130
 
1131
#ifdef HAVE_cc0
1132
  /* Don't allow the insn after a compare to be shared by
1133
     cross-jumping unless the compare is also shared.  */
1134
  if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1135
    last1 = afterlast1, last2 = afterlast2, ninsns--;
1136
#endif
1137
 
1138
  /* Include preceding notes and labels in the cross-jump.  One,
1139
     this may bring us to the head of the blocks as requested above.
1140
     Two, it keeps line number notes as matched as may be.  */
1141
  if (ninsns)
1142
    {
1143
      while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1144
        last1 = PREV_INSN (last1);
1145
 
1146
      if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1147
        last1 = PREV_INSN (last1);
1148
 
1149
      while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1150
        last2 = PREV_INSN (last2);
1151
 
1152
      if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1153
        last2 = PREV_INSN (last2);
1154
 
1155
      *f1 = last1;
1156
      *f2 = last2;
1157
    }
1158
 
1159
  return ninsns;
1160
}
1161
 
1162
/* Return true iff the condbranches at the end of BB1 and BB2 match.  */
1163
bool
1164
condjump_equiv_p (struct equiv_info *info, bool call_init)
1165
{
1166
  basic_block bb1 = info->x_block;
1167
  basic_block bb2 = info->y_block;
1168
  edge b1 = BRANCH_EDGE (bb1);
1169
  edge b2 = BRANCH_EDGE (bb2);
1170
  edge f1 = FALLTHRU_EDGE (bb1);
1171
  edge f2 = FALLTHRU_EDGE (bb2);
1172
  bool reverse, match;
1173
  rtx set1, set2, cond1, cond2;
1174
  rtx src1, src2;
1175
  enum rtx_code code1, code2;
1176
 
1177
  /* Get around possible forwarders on fallthru edges.  Other cases
1178
     should be optimized out already.  */
1179
  if (FORWARDER_BLOCK_P (f1->dest))
1180
    f1 = single_succ_edge (f1->dest);
1181
 
1182
  if (FORWARDER_BLOCK_P (f2->dest))
1183
    f2 = single_succ_edge (f2->dest);
1184
 
1185
  /* To simplify use of this function, return false if there are
1186
     unneeded forwarder blocks.  These will get eliminated later
1187
     during cleanup_cfg.  */
1188
  if (FORWARDER_BLOCK_P (f1->dest)
1189
      || FORWARDER_BLOCK_P (f2->dest)
1190
      || FORWARDER_BLOCK_P (b1->dest)
1191
      || FORWARDER_BLOCK_P (b2->dest))
1192
    return false;
1193
 
1194
  if (f1->dest == f2->dest && b1->dest == b2->dest)
1195
    reverse = false;
1196
  else if (f1->dest == b2->dest && b1->dest == f2->dest)
1197
    reverse = true;
1198
  else
1199
    return false;
1200
 
1201
  set1 = pc_set (BB_END (bb1));
1202
  set2 = pc_set (BB_END (bb2));
1203
  if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1204
      != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1205
    reverse = !reverse;
1206
 
1207
  src1 = SET_SRC (set1);
1208
  src2 = SET_SRC (set2);
1209
  cond1 = XEXP (src1, 0);
1210
  cond2 = XEXP (src2, 0);
1211
  code1 = GET_CODE (cond1);
1212
  if (reverse)
1213
    code2 = reversed_comparison_code (cond2, BB_END (bb2));
1214
  else
1215
    code2 = GET_CODE (cond2);
1216
 
1217
  if (code2 == UNKNOWN)
1218
    return false;
1219
 
1220
  if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
1221
    gcc_unreachable ();
1222
  /* Make the sources of the pc sets unreadable so that when we call
1223
     insns_match_p it won't process them.
1224
     The death_notes_match_p from insns_match_p won't see the local registers
1225
     used for the pc set, but that could only cause missed optimizations when
1226
     there are actually condjumps that use stack registers.  */
1227
  SET_SRC (set1) = pc_rtx;
1228
  SET_SRC (set2) = pc_rtx;
1229
  /* Verify codes and operands match.  */
1230
  if (code1 == code2)
1231
    {
1232
      match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1233
               && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
1234
               && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
1235
 
1236
    }
1237
  else if (code1 == swap_condition (code2))
1238
    {
1239
      match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1240
               && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
1241
               && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
1242
 
1243
    }
1244
  else
1245
    match = false;
1246
  SET_SRC (set1) = src1;
1247
  SET_SRC (set2) = src2;
1248
  match &= verify_changes (0);
1249
 
1250
  /* If we return true, we will join the blocks.  Which means that
1251
     we will only have one branch prediction bit to work with.  Thus
1252
     we require the existing branches to have probabilities that are
1253
     roughly similar.  */
1254
  if (match
1255
      && !optimize_size
1256
      && maybe_hot_bb_p (bb1)
1257
      && maybe_hot_bb_p (bb2))
1258
    {
1259
      int prob2;
1260
 
1261
      if (b1->dest == b2->dest)
1262
        prob2 = b2->probability;
1263
      else
1264
        /* Do not use f2 probability as f2 may be forwarded.  */
1265
        prob2 = REG_BR_PROB_BASE - b2->probability;
1266
 
1267
      /* Fail if the difference in probabilities is greater than 50%.
1268
         This rules out two well-predicted branches with opposite
1269
         outcomes.  */
1270
      if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1271
        {
1272
          if (dump_file)
1273
            fprintf (dump_file,
1274
                     "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1275
                     bb1->index, bb2->index, b1->probability, prob2);
1276
 
1277
          match = false;
1278
        }
1279
    }
1280
 
1281
  if (dump_file && match)
1282
    fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1283
             bb1->index, bb2->index);
1284
 
1285
  if (!match)
1286
    cancel_changes (0);
1287
  return match;
1288
}
1289
 
1290
/* Return true iff outgoing edges of BB1 and BB2 match, together with
1291
   the branch instruction.  This means that if we commonize the control
1292
   flow before end of the basic block, the semantic remains unchanged.
1293
 
1294
   We may assume that there exists one edge with a common destination.  */
1295
 
1296
static bool
1297
outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1298
{
1299
  int nehedges1 = 0, nehedges2 = 0;
1300
  edge fallthru1 = 0, fallthru2 = 0;
1301
  edge e1, e2;
1302
  edge_iterator ei;
1303
 
1304
  /* If BB1 has only one successor, we may be looking at either an
1305
     unconditional jump, or a fake edge to exit.  */
1306
  if (single_succ_p (bb1)
1307
      && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1308
      && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1309
    return (single_succ_p (bb2)
1310
            && (single_succ_edge (bb2)->flags
1311
                & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1312
            && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1313
 
1314
  /* Match conditional jumps - this may get tricky when fallthru and branch
1315
     edges are crossed.  */
1316
  if (EDGE_COUNT (bb1->succs) == 2
1317
      && any_condjump_p (BB_END (bb1))
1318
      && onlyjump_p (BB_END (bb1)))
1319
    {
1320
      edge b1, f1, b2, f2;
1321
      bool reverse, match;
1322
      rtx set1, set2, cond1, cond2;
1323
      enum rtx_code code1, code2;
1324
 
1325
      if (EDGE_COUNT (bb2->succs) != 2
1326
          || !any_condjump_p (BB_END (bb2))
1327
          || !onlyjump_p (BB_END (bb2)))
1328
        return false;
1329
 
1330
      b1 = BRANCH_EDGE (bb1);
1331
      b2 = BRANCH_EDGE (bb2);
1332
      f1 = FALLTHRU_EDGE (bb1);
1333
      f2 = FALLTHRU_EDGE (bb2);
1334
 
1335
      /* Get around possible forwarders on fallthru edges.  Other cases
1336
         should be optimized out already.  */
1337
      if (FORWARDER_BLOCK_P (f1->dest))
1338
        f1 = single_succ_edge (f1->dest);
1339
 
1340
      if (FORWARDER_BLOCK_P (f2->dest))
1341
        f2 = single_succ_edge (f2->dest);
1342
 
1343
      /* To simplify use of this function, return false if there are
1344
         unneeded forwarder blocks.  These will get eliminated later
1345
         during cleanup_cfg.  */
1346
      if (FORWARDER_BLOCK_P (f1->dest)
1347
          || FORWARDER_BLOCK_P (f2->dest)
1348
          || FORWARDER_BLOCK_P (b1->dest)
1349
          || FORWARDER_BLOCK_P (b2->dest))
1350
        return false;
1351
 
1352
      if (f1->dest == f2->dest && b1->dest == b2->dest)
1353
        reverse = false;
1354
      else if (f1->dest == b2->dest && b1->dest == f2->dest)
1355
        reverse = true;
1356
      else
1357
        return false;
1358
 
1359
      set1 = pc_set (BB_END (bb1));
1360
      set2 = pc_set (BB_END (bb2));
1361
      if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1362
          != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1363
        reverse = !reverse;
1364
 
1365
      cond1 = XEXP (SET_SRC (set1), 0);
1366
      cond2 = XEXP (SET_SRC (set2), 0);
1367
      code1 = GET_CODE (cond1);
1368
      if (reverse)
1369
        code2 = reversed_comparison_code (cond2, BB_END (bb2));
1370
      else
1371
        code2 = GET_CODE (cond2);
1372
 
1373
      if (code2 == UNKNOWN)
1374
        return false;
1375
 
1376
      /* Verify codes and operands match.  */
1377
      match = ((code1 == code2
1378
                && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1379
                && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1380
               || (code1 == swap_condition (code2)
1381
                   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1382
                                              XEXP (cond2, 0))
1383
                   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1384
                                              XEXP (cond2, 1))));
1385
 
1386
      /* If we return true, we will join the blocks.  Which means that
1387
         we will only have one branch prediction bit to work with.  Thus
1388
         we require the existing branches to have probabilities that are
1389
         roughly similar.  */
1390
      if (match
1391
          && !optimize_size
1392
          && maybe_hot_bb_p (bb1)
1393
          && maybe_hot_bb_p (bb2))
1394
        {
1395
          int prob2;
1396
 
1397
          if (b1->dest == b2->dest)
1398
            prob2 = b2->probability;
1399
          else
1400
            /* Do not use f2 probability as f2 may be forwarded.  */
1401
            prob2 = REG_BR_PROB_BASE - b2->probability;
1402
 
1403
          /* Fail if the difference in probabilities is greater than 50%.
1404
             This rules out two well-predicted branches with opposite
1405
             outcomes.  */
1406
          if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1407
            {
1408
              if (dump_file)
1409
                fprintf (dump_file,
1410
                         "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1411
                         bb1->index, bb2->index, b1->probability, prob2);
1412
 
1413
              return false;
1414
            }
1415
        }
1416
 
1417
      if (dump_file && match)
1418
        fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1419
                 bb1->index, bb2->index);
1420
 
1421
      return match;
1422
    }
1423
 
1424
  /* Generic case - we are seeing a computed jump, table jump or trapping
1425
     instruction.  */
1426
 
1427
  /* Check whether there are tablejumps in the end of BB1 and BB2.
1428
     Return true if they are identical.  */
1429
    {
1430
      rtx label1, label2;
1431
      rtx table1, table2;
1432
 
1433
      if (tablejump_p (BB_END (bb1), &label1, &table1)
1434
          && tablejump_p (BB_END (bb2), &label2, &table2)
1435
          && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1436
        {
1437
          /* The labels should never be the same rtx.  If they really are same
1438
             the jump tables are same too. So disable crossjumping of blocks BB1
1439
             and BB2 because when deleting the common insns in the end of BB1
1440
             by delete_basic_block () the jump table would be deleted too.  */
1441
          /* If LABEL2 is referenced in BB1->END do not do anything
1442
             because we would loose information when replacing
1443
             LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1444
          if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1445
            {
1446
              /* Set IDENTICAL to true when the tables are identical.  */
1447
              bool identical = false;
1448
              rtx p1, p2;
1449
 
1450
              p1 = PATTERN (table1);
1451
              p2 = PATTERN (table2);
1452
              if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1453
                {
1454
                  identical = true;
1455
                }
1456
              else if (GET_CODE (p1) == ADDR_DIFF_VEC
1457
                       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1458
                       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1459
                       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1460
                {
1461
                  int i;
1462
 
1463
                  identical = true;
1464
                  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1465
                    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1466
                      identical = false;
1467
                }
1468
 
1469
              if (identical)
1470
                {
1471
                  replace_label_data rr;
1472
                  bool match;
1473
 
1474
                  /* Temporarily replace references to LABEL1 with LABEL2
1475
                     in BB1->END so that we could compare the instructions.  */
1476
                  rr.r1 = label1;
1477
                  rr.r2 = label2;
1478
                  rr.update_label_nuses = false;
1479
                  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1480
 
1481
                  match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1482
                  if (dump_file && match)
1483
                    fprintf (dump_file,
1484
                             "Tablejumps in bb %i and %i match.\n",
1485
                             bb1->index, bb2->index);
1486
 
1487
                  /* Set the original label in BB1->END because when deleting
1488
                     a block whose end is a tablejump, the tablejump referenced
1489
                     from the instruction is deleted too.  */
1490
                  rr.r1 = label2;
1491
                  rr.r2 = label1;
1492
                  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1493
 
1494
                  return match;
1495
                }
1496
            }
1497
          return false;
1498
        }
1499
    }
1500
 
1501
  /* First ensure that the instructions match.  There may be many outgoing
1502
     edges so this test is generally cheaper.  */
1503
  if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1504
    return false;
1505
 
1506
  /* Search the outgoing edges, ensure that the counts do match, find possible
1507
     fallthru and exception handling edges since these needs more
1508
     validation.  */
1509
  if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1510
    return false;
1511
 
1512
  FOR_EACH_EDGE (e1, ei, bb1->succs)
1513
    {
1514
      e2 = EDGE_SUCC (bb2, ei.index);
1515
 
1516
      if (e1->flags & EDGE_EH)
1517
        nehedges1++;
1518
 
1519
      if (e2->flags & EDGE_EH)
1520
        nehedges2++;
1521
 
1522
      if (e1->flags & EDGE_FALLTHRU)
1523
        fallthru1 = e1;
1524
      if (e2->flags & EDGE_FALLTHRU)
1525
        fallthru2 = e2;
1526
    }
1527
 
1528
  /* If number of edges of various types does not match, fail.  */
1529
  if (nehedges1 != nehedges2
1530
      || (fallthru1 != 0) != (fallthru2 != 0))
1531
    return false;
1532
 
1533
  /* fallthru edges must be forwarded to the same destination.  */
1534
  if (fallthru1)
1535
    {
1536
      basic_block d1 = (forwarder_block_p (fallthru1->dest)
1537
                        ? single_succ (fallthru1->dest): fallthru1->dest);
1538
      basic_block d2 = (forwarder_block_p (fallthru2->dest)
1539
                        ? single_succ (fallthru2->dest): fallthru2->dest);
1540
 
1541
      if (d1 != d2)
1542
        return false;
1543
    }
1544
 
1545
  /* Ensure the same EH region.  */
1546
  {
1547
    rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1548
    rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1549
 
1550
    if (!n1 && n2)
1551
      return false;
1552
 
1553
    if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1554
      return false;
1555
  }
1556
 
1557
  /* The same checks as in try_crossjump_to_edge. It is required for RTL
1558
     version of sequence abstraction.  */
1559
  FOR_EACH_EDGE (e1, ei, bb2->succs)
1560
    {
1561
      edge e2;
1562
      edge_iterator ei;
1563
      basic_block d1 = e1->dest;
1564
 
1565
      if (FORWARDER_BLOCK_P (d1))
1566
        d1 = EDGE_SUCC (d1, 0)->dest;
1567
 
1568
      FOR_EACH_EDGE (e2, ei, bb1->succs)
1569
        {
1570
          basic_block d2 = e2->dest;
1571
          if (FORWARDER_BLOCK_P (d2))
1572
            d2 = EDGE_SUCC (d2, 0)->dest;
1573
          if (d1 == d2)
1574
            break;
1575
        }
1576
 
1577
      if (!e2)
1578
        return false;
1579
    }
1580
 
1581
  return true;
1582
}
1583
 
1584
/* Returns true if BB basic block has a preserve label.  */
1585
 
1586
static bool
1587
block_has_preserve_label (basic_block bb)
1588
{
1589
  return (bb
1590
          && block_label (bb)
1591
          && LABEL_PRESERVE_P (block_label (bb)));
1592
}
1593
 
1594
/* E1 and E2 are edges with the same destination block.  Search their
1595
   predecessors for common code.  If found, redirect control flow from
1596
   (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC.  */
1597
 
1598
static bool
1599
try_crossjump_to_edge (int mode, edge e1, edge e2)
1600
{
1601
  int nmatch;
1602
  basic_block src1 = e1->src, src2 = e2->src;
1603
  basic_block redirect_to, redirect_from, to_remove;
1604
  rtx newpos1, newpos2;
1605
  edge s;
1606
  edge_iterator ei;
1607
 
1608
  newpos1 = newpos2 = NULL_RTX;
1609
 
1610
  /* If we have partitioned hot/cold basic blocks, it is a bad idea
1611
     to try this optimization.
1612
 
1613
     Basic block partitioning may result in some jumps that appear to
1614
     be optimizable (or blocks that appear to be mergeable), but which really
1615
     must be left untouched (they are required to make it safely across
1616
     partition boundaries).  See the comments at the top of
1617
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1618
 
1619
  if (flag_reorder_blocks_and_partition && no_new_pseudos)
1620
    return false;
1621
 
1622
  /* Search backward through forwarder blocks.  We don't need to worry
1623
     about multiple entry or chained forwarders, as they will be optimized
1624
     away.  We do this to look past the unconditional jump following a
1625
     conditional jump that is required due to the current CFG shape.  */
1626
  if (single_pred_p (src1)
1627
      && FORWARDER_BLOCK_P (src1))
1628
    e1 = single_pred_edge (src1), src1 = e1->src;
1629
 
1630
  if (single_pred_p (src2)
1631
      && FORWARDER_BLOCK_P (src2))
1632
    e2 = single_pred_edge (src2), src2 = e2->src;
1633
 
1634
  /* Nothing to do if we reach ENTRY, or a common source block.  */
1635
  if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1636
    return false;
1637
  if (src1 == src2)
1638
    return false;
1639
 
1640
  /* Seeing more than 1 forwarder blocks would confuse us later...  */
1641
  if (FORWARDER_BLOCK_P (e1->dest)
1642
      && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1643
    return false;
1644
 
1645
  if (FORWARDER_BLOCK_P (e2->dest)
1646
      && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1647
    return false;
1648
 
1649
  /* Likewise with dead code (possibly newly created by the other optimizations
1650
     of cfg_cleanup).  */
1651
  if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1652
    return false;
1653
 
1654
  /* Look for the common insn sequence, part the first ...  */
1655
  if (!outgoing_edges_match (mode, src1, src2))
1656
    return false;
1657
 
1658
  /* ... and part the second.  */
1659
  nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1660
 
1661
  /* Don't proceed with the crossjump unless we found a sufficient number
1662
     of matching instructions or the 'from' block was totally matched
1663
     (such that its predecessors will hopefully be redirected and the
1664
     block removed).  */
1665
  if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1666
      && (newpos1 != BB_HEAD (src1)))
1667
    return false;
1668
 
1669
  /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1670
  if (block_has_preserve_label (e1->dest)
1671
      && (e1->flags & EDGE_ABNORMAL))
1672
    return false;
1673
 
1674
  /* Here we know that the insns in the end of SRC1 which are common with SRC2
1675
     will be deleted.
1676
     If we have tablejumps in the end of SRC1 and SRC2
1677
     they have been already compared for equivalence in outgoing_edges_match ()
1678
     so replace the references to TABLE1 by references to TABLE2.  */
1679
    {
1680
      rtx label1, label2;
1681
      rtx table1, table2;
1682
 
1683
      if (tablejump_p (BB_END (src1), &label1, &table1)
1684
          && tablejump_p (BB_END (src2), &label2, &table2)
1685
          && label1 != label2)
1686
        {
1687
          replace_label_data rr;
1688
          rtx insn;
1689
 
1690
          /* Replace references to LABEL1 with LABEL2.  */
1691
          rr.r1 = label1;
1692
          rr.r2 = label2;
1693
          rr.update_label_nuses = true;
1694
          for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1695
            {
1696
              /* Do not replace the label in SRC1->END because when deleting
1697
                 a block whose end is a tablejump, the tablejump referenced
1698
                 from the instruction is deleted too.  */
1699
              if (insn != BB_END (src1))
1700
                for_each_rtx (&insn, replace_label, &rr);
1701
            }
1702
        }
1703
    }
1704
 
1705
  /* Avoid splitting if possible.  We must always split when SRC2 has
1706
     EH predecessor edges, or we may end up with basic blocks with both
1707
     normal and EH predecessor edges.  */
1708
  if (newpos2 == BB_HEAD (src2)
1709
      && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1710
    redirect_to = src2;
1711
  else
1712
    {
1713
      if (newpos2 == BB_HEAD (src2))
1714
        {
1715
          /* Skip possible basic block header.  */
1716
          if (LABEL_P (newpos2))
1717
            newpos2 = NEXT_INSN (newpos2);
1718
          if (NOTE_P (newpos2))
1719
            newpos2 = NEXT_INSN (newpos2);
1720
        }
1721
 
1722
      if (dump_file)
1723
        fprintf (dump_file, "Splitting bb %i before %i insns\n",
1724
                 src2->index, nmatch);
1725
      redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1726
    }
1727
 
1728
  if (dump_file)
1729
    fprintf (dump_file,
1730
             "Cross jumping from bb %i to bb %i; %i common insns\n",
1731
             src1->index, src2->index, nmatch);
1732
 
1733
  redirect_to->count += src1->count;
1734
  redirect_to->frequency += src1->frequency;
1735
  /* We may have some registers visible through the block.  */
1736
  redirect_to->flags |= BB_DIRTY;
1737
 
1738
  /* Recompute the frequencies and counts of outgoing edges.  */
1739
  FOR_EACH_EDGE (s, ei, redirect_to->succs)
1740
    {
1741
      edge s2;
1742
      edge_iterator ei;
1743
      basic_block d = s->dest;
1744
 
1745
      if (FORWARDER_BLOCK_P (d))
1746
        d = single_succ (d);
1747
 
1748
      FOR_EACH_EDGE (s2, ei, src1->succs)
1749
        {
1750
          basic_block d2 = s2->dest;
1751
          if (FORWARDER_BLOCK_P (d2))
1752
            d2 = single_succ (d2);
1753
          if (d == d2)
1754
            break;
1755
        }
1756
 
1757
      s->count += s2->count;
1758
 
1759
      /* Take care to update possible forwarder blocks.  We verified
1760
         that there is no more than one in the chain, so we can't run
1761
         into infinite loop.  */
1762
      if (FORWARDER_BLOCK_P (s->dest))
1763
        {
1764
          single_succ_edge (s->dest)->count += s2->count;
1765
          s->dest->count += s2->count;
1766
          s->dest->frequency += EDGE_FREQUENCY (s);
1767
        }
1768
 
1769
      if (FORWARDER_BLOCK_P (s2->dest))
1770
        {
1771
          single_succ_edge (s2->dest)->count -= s2->count;
1772
          if (single_succ_edge (s2->dest)->count < 0)
1773
            single_succ_edge (s2->dest)->count = 0;
1774
          s2->dest->count -= s2->count;
1775
          s2->dest->frequency -= EDGE_FREQUENCY (s);
1776
          if (s2->dest->frequency < 0)
1777
            s2->dest->frequency = 0;
1778
          if (s2->dest->count < 0)
1779
            s2->dest->count = 0;
1780
        }
1781
 
1782
      if (!redirect_to->frequency && !src1->frequency)
1783
        s->probability = (s->probability + s2->probability) / 2;
1784
      else
1785
        s->probability
1786
          = ((s->probability * redirect_to->frequency +
1787
              s2->probability * src1->frequency)
1788
             / (redirect_to->frequency + src1->frequency));
1789
    }
1790
 
1791
  update_br_prob_note (redirect_to);
1792
 
1793
  /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
1794
 
1795
  /* Skip possible basic block header.  */
1796
  if (LABEL_P (newpos1))
1797
    newpos1 = NEXT_INSN (newpos1);
1798
 
1799
  if (NOTE_P (newpos1))
1800
    newpos1 = NEXT_INSN (newpos1);
1801
 
1802
  redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1803
  to_remove = single_succ (redirect_from);
1804
 
1805
  redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1806
  delete_basic_block (to_remove);
1807
 
1808
  update_forwarder_flag (redirect_from);
1809
  if (redirect_to != src2)
1810
    update_forwarder_flag (src2);
1811
 
1812
  return true;
1813
}
1814
 
1815
/* Search the predecessors of BB for common insn sequences.  When found,
1816
   share code between them by redirecting control flow.  Return true if
1817
   any changes made.  */
1818
 
1819
static bool
1820
try_crossjump_bb (int mode, basic_block bb)
1821
{
1822
  edge e, e2, fallthru;
1823
  bool changed;
1824
  unsigned max, ix, ix2;
1825
  basic_block ev, ev2;
1826
  edge_iterator ei;
1827
 
1828
  /* Nothing to do if there is not at least two incoming edges.  */
1829
  if (EDGE_COUNT (bb->preds) < 2)
1830
    return false;
1831
 
1832
  /* Don't crossjump if this block ends in a computed jump,
1833
     unless we are optimizing for size.  */
1834
  if (!optimize_size
1835
      && bb != EXIT_BLOCK_PTR
1836
      && computed_jump_p (BB_END (bb)))
1837
    return false;
1838
 
1839
  /* If we are partitioning hot/cold basic blocks, we don't want to
1840
     mess up unconditional or indirect jumps that cross between hot
1841
     and cold sections.
1842
 
1843
     Basic block partitioning may result in some jumps that appear to
1844
     be optimizable (or blocks that appear to be mergeable), but which really
1845
     must be left untouched (they are required to make it safely across
1846
     partition boundaries).  See the comments at the top of
1847
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1848
 
1849
  if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1850
                                        BB_PARTITION (EDGE_PRED (bb, 1)->src)
1851
      || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1852
    return false;
1853
 
1854
  /* It is always cheapest to redirect a block that ends in a branch to
1855
     a block that falls through into BB, as that adds no branches to the
1856
     program.  We'll try that combination first.  */
1857
  fallthru = NULL;
1858
  max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1859
 
1860
  if (EDGE_COUNT (bb->preds) > max)
1861
    return false;
1862
 
1863
  FOR_EACH_EDGE (e, ei, bb->preds)
1864
    {
1865
      if (e->flags & EDGE_FALLTHRU)
1866
        fallthru = e;
1867
    }
1868
 
1869
  changed = false;
1870
  for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1871
    {
1872
      e = EDGE_PRED (ev, ix);
1873
      ix++;
1874
 
1875
      /* As noted above, first try with the fallthru predecessor.  */
1876
      if (fallthru)
1877
        {
1878
          /* Don't combine the fallthru edge into anything else.
1879
             If there is a match, we'll do it the other way around.  */
1880
          if (e == fallthru)
1881
            continue;
1882
          /* If nothing changed since the last attempt, there is nothing
1883
             we can do.  */
1884
          if (!first_pass
1885
              && (!(e->src->flags & BB_DIRTY)
1886
                  && !(fallthru->src->flags & BB_DIRTY)))
1887
            continue;
1888
 
1889
          if (try_crossjump_to_edge (mode, e, fallthru))
1890
            {
1891
              changed = true;
1892
              ix = 0;
1893
              ev = bb;
1894
              continue;
1895
            }
1896
        }
1897
 
1898
      /* Non-obvious work limiting check: Recognize that we're going
1899
         to call try_crossjump_bb on every basic block.  So if we have
1900
         two blocks with lots of outgoing edges (a switch) and they
1901
         share lots of common destinations, then we would do the
1902
         cross-jump check once for each common destination.
1903
 
1904
         Now, if the blocks actually are cross-jump candidates, then
1905
         all of their destinations will be shared.  Which means that
1906
         we only need check them for cross-jump candidacy once.  We
1907
         can eliminate redundant checks of crossjump(A,B) by arbitrarily
1908
         choosing to do the check from the block for which the edge
1909
         in question is the first successor of A.  */
1910
      if (EDGE_SUCC (e->src, 0) != e)
1911
        continue;
1912
 
1913
      for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1914
        {
1915
          e2 = EDGE_PRED (ev2, ix2);
1916
          ix2++;
1917
 
1918
          if (e2 == e)
1919
            continue;
1920
 
1921
          /* We've already checked the fallthru edge above.  */
1922
          if (e2 == fallthru)
1923
            continue;
1924
 
1925
          /* The "first successor" check above only prevents multiple
1926
             checks of crossjump(A,B).  In order to prevent redundant
1927
             checks of crossjump(B,A), require that A be the block
1928
             with the lowest index.  */
1929
          if (e->src->index > e2->src->index)
1930
            continue;
1931
 
1932
          /* If nothing changed since the last attempt, there is nothing
1933
             we can do.  */
1934
          if (!first_pass
1935
              && (!(e->src->flags & BB_DIRTY)
1936
                  && !(e2->src->flags & BB_DIRTY)))
1937
            continue;
1938
 
1939
          if (try_crossjump_to_edge (mode, e, e2))
1940
            {
1941
              changed = true;
1942
              ev2 = bb;
1943
              ix = 0;
1944
              break;
1945
            }
1946
        }
1947
    }
1948
 
1949
  return changed;
1950
}
1951
 
1952
/* Do simple CFG optimizations - basic block merging, simplifying of jump
1953
   instructions etc.  Return nonzero if changes were made.  */
1954
 
1955
static bool
1956
try_optimize_cfg (int mode)
1957
{
1958
  bool changed_overall = false;
1959
  bool changed;
1960
  int iterations = 0;
1961
  basic_block bb, b, next;
1962
 
1963
  if (mode & CLEANUP_CROSSJUMP)
1964
    add_noreturn_fake_exit_edges ();
1965
 
1966
  if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1967
    clear_bb_flags ();
1968
 
1969
  FOR_EACH_BB (bb)
1970
    update_forwarder_flag (bb);
1971
 
1972
  if (! targetm.cannot_modify_jumps_p ())
1973
    {
1974
      first_pass = true;
1975
      /* Attempt to merge blocks as made possible by edge removal.  If
1976
         a block has only one successor, and the successor has only
1977
         one predecessor, they may be combined.  */
1978
      do
1979
        {
1980
          changed = false;
1981
          iterations++;
1982
 
1983
          if (dump_file)
1984
            fprintf (dump_file,
1985
                     "\n\ntry_optimize_cfg iteration %i\n\n",
1986
                     iterations);
1987
 
1988
          for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1989
            {
1990
              basic_block c;
1991
              edge s;
1992
              bool changed_here = false;
1993
 
1994
              /* Delete trivially dead basic blocks.  */
1995
              while (EDGE_COUNT (b->preds) == 0)
1996
                {
1997
                  c = b->prev_bb;
1998
                  if (dump_file)
1999
                    fprintf (dump_file, "Deleting block %i.\n",
2000
                             b->index);
2001
 
2002
                  delete_basic_block (b);
2003
                  if (!(mode & CLEANUP_CFGLAYOUT))
2004
                    changed = true;
2005
                  b = c;
2006
                }
2007
 
2008
              /* Remove code labels no longer used.  */
2009
              if (single_pred_p (b)
2010
                  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2011
                  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2012
                  && LABEL_P (BB_HEAD (b))
2013
                  /* If the previous block ends with a branch to this
2014
                     block, we can't delete the label.  Normally this
2015
                     is a condjump that is yet to be simplified, but
2016
                     if CASE_DROPS_THRU, this can be a tablejump with
2017
                     some element going to the same place as the
2018
                     default (fallthru).  */
2019
                  && (single_pred (b) == ENTRY_BLOCK_PTR
2020
                      || !JUMP_P (BB_END (single_pred (b)))
2021
                      || ! label_is_jump_target_p (BB_HEAD (b),
2022
                                                   BB_END (single_pred (b)))))
2023
                {
2024
                  rtx label = BB_HEAD (b);
2025
 
2026
                  delete_insn_chain (label, label);
2027
                  /* In the case label is undeletable, move it after the
2028
                     BASIC_BLOCK note.  */
2029
                  if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2030
                    {
2031
                      rtx bb_note = NEXT_INSN (BB_HEAD (b));
2032
 
2033
                      reorder_insns_nobb (label, label, bb_note);
2034
                      BB_HEAD (b) = bb_note;
2035
                    }
2036
                  if (dump_file)
2037
                    fprintf (dump_file, "Deleted label in block %i.\n",
2038
                             b->index);
2039
                }
2040
 
2041
              /* If we fall through an empty block, we can remove it.  */
2042
              if (!(mode & CLEANUP_CFGLAYOUT)
2043
                  && single_pred_p (b)
2044
                  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2045
                  && !LABEL_P (BB_HEAD (b))
2046
                  && FORWARDER_BLOCK_P (b)
2047
                  /* Note that forwarder_block_p true ensures that
2048
                     there is a successor for this block.  */
2049
                  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2050
                  && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2051
                {
2052
                  if (dump_file)
2053
                    fprintf (dump_file,
2054
                             "Deleting fallthru block %i.\n",
2055
                             b->index);
2056
 
2057
                  c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2058
                  redirect_edge_succ_nodup (single_pred_edge (b),
2059
                                            single_succ (b));
2060
                  delete_basic_block (b);
2061
                  changed = true;
2062
                  b = c;
2063
                }
2064
 
2065
              if (single_succ_p (b)
2066
                  && (s = single_succ_edge (b))
2067
                  && !(s->flags & EDGE_COMPLEX)
2068
                  && (c = s->dest) != EXIT_BLOCK_PTR
2069
                  && single_pred_p (c)
2070
                  && b != c)
2071
                {
2072
                  /* When not in cfg_layout mode use code aware of reordering
2073
                     INSN.  This code possibly creates new basic blocks so it
2074
                     does not fit merge_blocks interface and is kept here in
2075
                     hope that it will become useless once more of compiler
2076
                     is transformed to use cfg_layout mode.  */
2077
 
2078
                  if ((mode & CLEANUP_CFGLAYOUT)
2079
                      && can_merge_blocks_p (b, c))
2080
                    {
2081
                      merge_blocks (b, c);
2082
                      update_forwarder_flag (b);
2083
                      changed_here = true;
2084
                    }
2085
                  else if (!(mode & CLEANUP_CFGLAYOUT)
2086
                           /* If the jump insn has side effects,
2087
                              we can't kill the edge.  */
2088
                           && (!JUMP_P (BB_END (b))
2089
                               || (reload_completed
2090
                                   ? simplejump_p (BB_END (b))
2091
                                   : (onlyjump_p (BB_END (b))
2092
                                      && !tablejump_p (BB_END (b),
2093
                                                       NULL, NULL))))
2094
                           && (next = merge_blocks_move (s, b, c, mode)))
2095
                      {
2096
                        b = next;
2097
                        changed_here = true;
2098
                      }
2099
                }
2100
 
2101
              /* Simplify branch over branch.  */
2102
              if ((mode & CLEANUP_EXPENSIVE)
2103
                   && !(mode & CLEANUP_CFGLAYOUT)
2104
                   && try_simplify_condjump (b))
2105
                changed_here = true;
2106
 
2107
              /* If B has a single outgoing edge, but uses a
2108
                 non-trivial jump instruction without side-effects, we
2109
                 can either delete the jump entirely, or replace it
2110
                 with a simple unconditional jump.  */
2111
              if (single_succ_p (b)
2112
                  && single_succ (b) != EXIT_BLOCK_PTR
2113
                  && onlyjump_p (BB_END (b))
2114
                  && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2115
                  && try_redirect_by_replacing_jump (single_succ_edge (b),
2116
                                                     single_succ (b),
2117
                                                     (mode & CLEANUP_CFGLAYOUT) != 0))
2118
                {
2119
                  update_forwarder_flag (b);
2120
                  changed_here = true;
2121
                }
2122
 
2123
              /* Simplify branch to branch.  */
2124
              if (try_forward_edges (mode, b))
2125
                changed_here = true;
2126
 
2127
              /* Look for shared code between blocks.  */
2128
              if ((mode & CLEANUP_CROSSJUMP)
2129
                  && try_crossjump_bb (mode, b))
2130
                changed_here = true;
2131
 
2132
              /* Don't get confused by the index shift caused by
2133
                 deleting blocks.  */
2134
              if (!changed_here)
2135
                b = b->next_bb;
2136
              else
2137
                changed = true;
2138
            }
2139
 
2140
          if ((mode & CLEANUP_CROSSJUMP)
2141
              && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2142
            changed = true;
2143
 
2144
#ifdef ENABLE_CHECKING
2145
          if (changed)
2146
            verify_flow_info ();
2147
#endif
2148
 
2149
          changed_overall |= changed;
2150
          first_pass = false;
2151
        }
2152
      while (changed);
2153
    }
2154
 
2155
  if (mode & CLEANUP_CROSSJUMP)
2156
    remove_fake_exit_edges ();
2157
 
2158
  FOR_ALL_BB (b)
2159
    b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2160
 
2161
  return changed_overall;
2162
}
2163
 
2164
/* Delete all unreachable basic blocks.  */
2165
 
2166
bool
2167
delete_unreachable_blocks (void)
2168
{
2169
  bool changed = false;
2170
  basic_block b, next_bb;
2171
 
2172
  find_unreachable_blocks ();
2173
 
2174
  /* Delete all unreachable basic blocks.  */
2175
 
2176
  for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2177
    {
2178
      next_bb = b->next_bb;
2179
 
2180
      if (!(b->flags & BB_REACHABLE))
2181
        {
2182
          delete_basic_block (b);
2183
          changed = true;
2184
        }
2185
    }
2186
 
2187
  if (changed)
2188
    tidy_fallthru_edges ();
2189
  return changed;
2190
}
2191
 
2192
/* Merges sequential blocks if possible.  */
2193
 
2194
bool
2195
merge_seq_blocks (void)
2196
{
2197
  basic_block bb;
2198
  bool changed = false;
2199
 
2200
  for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2201
    {
2202
      if (single_succ_p (bb)
2203
          && can_merge_blocks_p (bb, single_succ (bb)))
2204
        {
2205
          /* Merge the blocks and retry.  */
2206
          merge_blocks (bb, single_succ (bb));
2207
          changed = true;
2208
          continue;
2209
        }
2210
 
2211
      bb = bb->next_bb;
2212
    }
2213
 
2214
  return changed;
2215
}
2216
 
2217
/* Tidy the CFG by deleting unreachable code and whatnot.  */
2218
 
2219
bool
2220
cleanup_cfg (int mode)
2221
{
2222
  bool changed = false;
2223
 
2224
  timevar_push (TV_CLEANUP_CFG);
2225
  if (delete_unreachable_blocks ())
2226
    {
2227
      changed = true;
2228
      /* We've possibly created trivially dead code.  Cleanup it right
2229
         now to introduce more opportunities for try_optimize_cfg.  */
2230
      if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2231
          && !reload_completed)
2232
        delete_trivially_dead_insns (get_insns(), max_reg_num ());
2233
    }
2234
 
2235
  compact_blocks ();
2236
 
2237
  while (try_optimize_cfg (mode))
2238
    {
2239
      delete_unreachable_blocks (), changed = true;
2240
      if (mode & CLEANUP_UPDATE_LIFE)
2241
        {
2242
          /* Cleaning up CFG introduces more opportunities for dead code
2243
             removal that in turn may introduce more opportunities for
2244
             cleaning up the CFG.  */
2245
          if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2246
                                                 PROP_DEATH_NOTES
2247
                                                 | PROP_SCAN_DEAD_CODE
2248
                                                 | PROP_KILL_DEAD_CODE
2249
                                                 | ((mode & CLEANUP_LOG_LINKS)
2250
                                                    ? PROP_LOG_LINKS : 0)))
2251
            break;
2252
        }
2253
      else if (!(mode & CLEANUP_NO_INSN_DEL)
2254
               && (mode & CLEANUP_EXPENSIVE)
2255
               && !reload_completed)
2256
        {
2257
          if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2258
            break;
2259
        }
2260
      else
2261
        break;
2262
      delete_dead_jumptables ();
2263
    }
2264
 
2265
  timevar_pop (TV_CLEANUP_CFG);
2266
 
2267
  return changed;
2268
}
2269
 
2270
static unsigned int
2271
rest_of_handle_jump (void)
2272
{
2273
  delete_unreachable_blocks ();
2274
 
2275
  if (cfun->tail_call_emit)
2276
    fixup_tail_calls ();
2277
  return 0;
2278
}
2279
 
2280
struct tree_opt_pass pass_jump =
2281
{
2282
  "sibling",                            /* name */
2283
  NULL,                                 /* gate */
2284
  rest_of_handle_jump,                  /* execute */
2285
  NULL,                                 /* sub */
2286
  NULL,                                 /* next */
2287
  0,                                    /* static_pass_number */
2288
  TV_JUMP,                              /* tv_id */
2289
  0,                                    /* properties_required */
2290
  0,                                    /* properties_provided */
2291
  0,                                    /* properties_destroyed */
2292
  TODO_ggc_collect,                     /* todo_flags_start */
2293
  TODO_dump_func |
2294
  TODO_verify_flow,                     /* todo_flags_finish */
2295
  'i'                                   /* letter */
2296
};
2297
 
2298
 
2299
static unsigned int
2300
rest_of_handle_jump2 (void)
2301
{
2302
  /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB.  Do this
2303
     before jump optimization switches branch directions.  */
2304
  if (flag_guess_branch_prob)
2305
    expected_value_to_br_prob ();
2306
 
2307
  delete_trivially_dead_insns (get_insns (), max_reg_num ());
2308
  reg_scan (get_insns (), max_reg_num ());
2309
  if (dump_file)
2310
    dump_flow_info (dump_file, dump_flags);
2311
  cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2312
               | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2313
 
2314
  purge_line_number_notes ();
2315
 
2316
  if (optimize)
2317
    cleanup_cfg (CLEANUP_EXPENSIVE);
2318
 
2319
  /* Jump optimization, and the removal of NULL pointer checks, may
2320
     have reduced the number of instructions substantially.  CSE, and
2321
     future passes, allocate arrays whose dimensions involve the
2322
     maximum instruction UID, so if we can reduce the maximum UID
2323
     we'll save big on memory.  */
2324
  renumber_insns ();
2325
  return 0;
2326
}
2327
 
2328
 
2329
struct tree_opt_pass pass_jump2 =
2330
{
2331
  "jump",                               /* name */
2332
  NULL,                                 /* gate */
2333
  rest_of_handle_jump2,                 /* execute */
2334
  NULL,                                 /* sub */
2335
  NULL,                                 /* next */
2336
  0,                                    /* static_pass_number */
2337
  TV_JUMP,                              /* tv_id */
2338
  0,                                    /* properties_required */
2339
  0,                                    /* properties_provided */
2340
  0,                                    /* properties_destroyed */
2341
  TODO_ggc_collect,                     /* todo_flags_start */
2342
  TODO_dump_func,                       /* todo_flags_finish */
2343
  'j'                                   /* letter */
2344
};
2345
 
2346
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.