OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [cfgcleanup.c] - Blame information for rev 731

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Control flow optimization code for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
/* This file contains optimizer of the control flow.  The main entry point is
23
   cleanup_cfg.  Following optimizations are performed:
24
 
25
   - Unreachable blocks removal
26
   - Edge forwarding (edge to the forwarder block is forwarded to its
27
     successor.  Simplification of the branch instruction is performed by
28
     underlying infrastructure so branch can be converted to simplejump or
29
     eliminated).
30
   - Cross jumping (tail merging)
31
   - Conditional jump-around-simplejump simplification
32
   - Basic block merging.  */
33
 
34
#include "config.h"
35
#include "system.h"
36
#include "coretypes.h"
37
#include "tm.h"
38
#include "rtl.h"
39
#include "hard-reg-set.h"
40
#include "regs.h"
41
#include "timevar.h"
42
#include "output.h"
43
#include "insn-config.h"
44
#include "flags.h"
45
#include "recog.h"
46
#include "diagnostic-core.h"
47
#include "cselib.h"
48
#include "params.h"
49
#include "tm_p.h"
50
#include "target.h"
51
#include "cfglayout.h"
52
#include "emit-rtl.h"
53
#include "tree-pass.h"
54
#include "cfgloop.h"
55
#include "expr.h"
56
#include "df.h"
57
#include "dce.h"
58
#include "dbgcnt.h"
59
 
60
#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
61
 
62
/* Set to true when we are running first pass of try_optimize_cfg loop.  */
63
static bool first_pass;
64
 
65
/* Set to true if crossjumps occured in the latest run of try_optimize_cfg.  */
66
static bool crossjumps_occured;
67
 
68
/* Set to true if we couldn't run an optimization due to stale liveness
69
   information; we should run df_analyze to enable more opportunities.  */
70
static bool block_was_dirty;
71
 
72
static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
73
static bool try_crossjump_bb (int, basic_block);
74
static bool outgoing_edges_match (int, basic_block, basic_block);
75
static enum replace_direction old_insns_match_p (int, rtx, rtx);
76
 
77
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
78
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
79
static bool try_optimize_cfg (int);
80
static bool try_simplify_condjump (basic_block);
81
static bool try_forward_edges (int, basic_block);
82
static edge thread_jump (edge, basic_block);
83
static bool mark_effect (rtx, bitmap);
84
static void notice_new_block (basic_block);
85
static void update_forwarder_flag (basic_block);
86
static int mentions_nonequal_regs (rtx *, void *);
87
static void merge_memattrs (rtx, rtx);
88
 
89
/* Set flags for newly created block.  */
90
 
91
static void
92
notice_new_block (basic_block bb)
93
{
94
  if (!bb)
95
    return;
96
 
97
  if (forwarder_block_p (bb))
98
    bb->flags |= BB_FORWARDER_BLOCK;
99
}
100
 
101
/* Recompute forwarder flag after block has been modified.  */
102
 
103
static void
104
update_forwarder_flag (basic_block bb)
105
{
106
  if (forwarder_block_p (bb))
107
    bb->flags |= BB_FORWARDER_BLOCK;
108
  else
109
    bb->flags &= ~BB_FORWARDER_BLOCK;
110
}
111
 
112
/* Simplify a conditional jump around an unconditional jump.
113
   Return true if something changed.  */
114
 
115
static bool
116
try_simplify_condjump (basic_block cbranch_block)
117
{
118
  basic_block jump_block, jump_dest_block, cbranch_dest_block;
119
  edge cbranch_jump_edge, cbranch_fallthru_edge;
120
  rtx cbranch_insn;
121
 
122
  /* Verify that there are exactly two successors.  */
123
  if (EDGE_COUNT (cbranch_block->succs) != 2)
124
    return false;
125
 
126
  /* Verify that we've got a normal conditional branch at the end
127
     of the block.  */
128
  cbranch_insn = BB_END (cbranch_block);
129
  if (!any_condjump_p (cbranch_insn))
130
    return false;
131
 
132
  cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
133
  cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
134
 
135
  /* The next block must not have multiple predecessors, must not
136
     be the last block in the function, and must contain just the
137
     unconditional jump.  */
138
  jump_block = cbranch_fallthru_edge->dest;
139
  if (!single_pred_p (jump_block)
140
      || jump_block->next_bb == EXIT_BLOCK_PTR
141
      || !FORWARDER_BLOCK_P (jump_block))
142
    return false;
143
  jump_dest_block = single_succ (jump_block);
144
 
145
  /* If we are partitioning hot/cold basic blocks, we don't want to
146
     mess up unconditional or indirect jumps that cross between hot
147
     and cold sections.
148
 
149
     Basic block partitioning may result in some jumps that appear to
150
     be optimizable (or blocks that appear to be mergeable), but which really
151
     must be left untouched (they are required to make it safely across
152
     partition boundaries).  See the comments at the top of
153
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
154
 
155
  if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
156
      || (cbranch_jump_edge->flags & EDGE_CROSSING))
157
    return false;
158
 
159
  /* The conditional branch must target the block after the
160
     unconditional branch.  */
161
  cbranch_dest_block = cbranch_jump_edge->dest;
162
 
163
  if (cbranch_dest_block == EXIT_BLOCK_PTR
164
      || !can_fallthru (jump_block, cbranch_dest_block))
165
    return false;
166
 
167
  /* Invert the conditional branch.  */
168
  if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
169
    return false;
170
 
171
  if (dump_file)
172
    fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
173
             INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
174
 
175
  /* Success.  Update the CFG to match.  Note that after this point
176
     the edge variable names appear backwards; the redirection is done
177
     this way to preserve edge profile data.  */
178
  cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
179
                                                cbranch_dest_block);
180
  cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
181
                                                    jump_dest_block);
182
  cbranch_jump_edge->flags |= EDGE_FALLTHRU;
183
  cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
184
  update_br_prob_note (cbranch_block);
185
 
186
  /* Delete the block with the unconditional jump, and clean up the mess.  */
187
  delete_basic_block (jump_block);
188
  tidy_fallthru_edge (cbranch_jump_edge);
189
  update_forwarder_flag (cbranch_block);
190
 
191
  return true;
192
}
193
 
194
/* Attempt to prove that operation is NOOP using CSElib or mark the effect
195
   on register.  Used by jump threading.  */
196
 
197
static bool
198
mark_effect (rtx exp, regset nonequal)
199
{
200
  int regno;
201
  rtx dest;
202
  switch (GET_CODE (exp))
203
    {
204
      /* In case we do clobber the register, mark it as equal, as we know the
205
         value is dead so it don't have to match.  */
206
    case CLOBBER:
207
      if (REG_P (XEXP (exp, 0)))
208
        {
209
          dest = XEXP (exp, 0);
210
          regno = REGNO (dest);
211
          if (HARD_REGISTER_NUM_P (regno))
212
            bitmap_clear_range (nonequal, regno,
213
                                hard_regno_nregs[regno][GET_MODE (dest)]);
214
          else
215
            bitmap_clear_bit (nonequal, regno);
216
        }
217
      return false;
218
 
219
    case SET:
220
      if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
221
        return false;
222
      dest = SET_DEST (exp);
223
      if (dest == pc_rtx)
224
        return false;
225
      if (!REG_P (dest))
226
        return true;
227
      regno = REGNO (dest);
228
      if (HARD_REGISTER_NUM_P (regno))
229
        bitmap_set_range (nonequal, regno,
230
                          hard_regno_nregs[regno][GET_MODE (dest)]);
231
      else
232
        bitmap_set_bit (nonequal, regno);
233
      return false;
234
 
235
    default:
236
      return false;
237
    }
238
}
239
 
240
/* Return nonzero if X is a register set in regset DATA.
241
   Called via for_each_rtx.  */
242
static int
243
mentions_nonequal_regs (rtx *x, void *data)
244
{
245
  regset nonequal = (regset) data;
246
  if (REG_P (*x))
247
    {
248
      int regno;
249
 
250
      regno = REGNO (*x);
251
      if (REGNO_REG_SET_P (nonequal, regno))
252
        return 1;
253
      if (regno < FIRST_PSEUDO_REGISTER)
254
        {
255
          int n = hard_regno_nregs[regno][GET_MODE (*x)];
256
          while (--n > 0)
257
            if (REGNO_REG_SET_P (nonequal, regno + n))
258
              return 1;
259
        }
260
    }
261
  return 0;
262
}
263
/* Attempt to prove that the basic block B will have no side effects and
264
   always continues in the same edge if reached via E.  Return the edge
265
   if exist, NULL otherwise.  */
266
 
267
static edge
268
thread_jump (edge e, basic_block b)
269
{
270
  rtx set1, set2, cond1, cond2, insn;
271
  enum rtx_code code1, code2, reversed_code2;
272
  bool reverse1 = false;
273
  unsigned i;
274
  regset nonequal;
275
  bool failed = false;
276
  reg_set_iterator rsi;
277
 
278
  if (b->flags & BB_NONTHREADABLE_BLOCK)
279
    return NULL;
280
 
281
  /* At the moment, we do handle only conditional jumps, but later we may
282
     want to extend this code to tablejumps and others.  */
283
  if (EDGE_COUNT (e->src->succs) != 2)
284
    return NULL;
285
  if (EDGE_COUNT (b->succs) != 2)
286
    {
287
      b->flags |= BB_NONTHREADABLE_BLOCK;
288
      return NULL;
289
    }
290
 
291
  /* Second branch must end with onlyjump, as we will eliminate the jump.  */
292
  if (!any_condjump_p (BB_END (e->src)))
293
    return NULL;
294
 
295
  if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
296
    {
297
      b->flags |= BB_NONTHREADABLE_BLOCK;
298
      return NULL;
299
    }
300
 
301
  set1 = pc_set (BB_END (e->src));
302
  set2 = pc_set (BB_END (b));
303
  if (((e->flags & EDGE_FALLTHRU) != 0)
304
      != (XEXP (SET_SRC (set1), 1) == pc_rtx))
305
    reverse1 = true;
306
 
307
  cond1 = XEXP (SET_SRC (set1), 0);
308
  cond2 = XEXP (SET_SRC (set2), 0);
309
  if (reverse1)
310
    code1 = reversed_comparison_code (cond1, BB_END (e->src));
311
  else
312
    code1 = GET_CODE (cond1);
313
 
314
  code2 = GET_CODE (cond2);
315
  reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
316
 
317
  if (!comparison_dominates_p (code1, code2)
318
      && !comparison_dominates_p (code1, reversed_code2))
319
    return NULL;
320
 
321
  /* Ensure that the comparison operators are equivalent.
322
     ??? This is far too pessimistic.  We should allow swapped operands,
323
     different CCmodes, or for example comparisons for interval, that
324
     dominate even when operands are not equivalent.  */
325
  if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
326
      || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
327
    return NULL;
328
 
329
  /* Short circuit cases where block B contains some side effects, as we can't
330
     safely bypass it.  */
331
  for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
332
       insn = NEXT_INSN (insn))
333
    if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
334
      {
335
        b->flags |= BB_NONTHREADABLE_BLOCK;
336
        return NULL;
337
      }
338
 
339
  cselib_init (0);
340
 
341
  /* First process all values computed in the source basic block.  */
342
  for (insn = NEXT_INSN (BB_HEAD (e->src));
343
       insn != NEXT_INSN (BB_END (e->src));
344
       insn = NEXT_INSN (insn))
345
    if (INSN_P (insn))
346
      cselib_process_insn (insn);
347
 
348
  nonequal = BITMAP_ALLOC (NULL);
349
  CLEAR_REG_SET (nonequal);
350
 
351
  /* Now assume that we've continued by the edge E to B and continue
352
     processing as if it were same basic block.
353
     Our goal is to prove that whole block is an NOOP.  */
354
 
355
  for (insn = NEXT_INSN (BB_HEAD (b));
356
       insn != NEXT_INSN (BB_END (b)) && !failed;
357
       insn = NEXT_INSN (insn))
358
    {
359
      if (INSN_P (insn))
360
        {
361
          rtx pat = PATTERN (insn);
362
 
363
          if (GET_CODE (pat) == PARALLEL)
364
            {
365
              for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
366
                failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
367
            }
368
          else
369
            failed |= mark_effect (pat, nonequal);
370
        }
371
 
372
      cselib_process_insn (insn);
373
    }
374
 
375
  /* Later we should clear nonequal of dead registers.  So far we don't
376
     have life information in cfg_cleanup.  */
377
  if (failed)
378
    {
379
      b->flags |= BB_NONTHREADABLE_BLOCK;
380
      goto failed_exit;
381
    }
382
 
383
  /* cond2 must not mention any register that is not equal to the
384
     former block.  */
385
  if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386
    goto failed_exit;
387
 
388
  EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
389
    goto failed_exit;
390
 
391
  BITMAP_FREE (nonequal);
392
  cselib_finish ();
393
  if ((comparison_dominates_p (code1, code2) != 0)
394
      != (XEXP (SET_SRC (set2), 1) == pc_rtx))
395
    return BRANCH_EDGE (b);
396
  else
397
    return FALLTHRU_EDGE (b);
398
 
399
failed_exit:
400
  BITMAP_FREE (nonequal);
401
  cselib_finish ();
402
  return NULL;
403
}
404
 
405
/* Attempt to forward edges leaving basic block B.
406
   Return true if successful.  */
407
 
408
static bool
409
try_forward_edges (int mode, basic_block b)
410
{
411
  bool changed = false;
412
  edge_iterator ei;
413
  edge e, *threaded_edges = NULL;
414
 
415
  /* If we are partitioning hot/cold basic blocks, we don't want to
416
     mess up unconditional or indirect jumps that cross between hot
417
     and cold sections.
418
 
419
     Basic block partitioning may result in some jumps that appear to
420
     be optimizable (or blocks that appear to be mergeable), but which really
421
     must be left untouched (they are required to make it safely across
422
     partition boundaries).  See the comments at the top of
423
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
424
 
425
  if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
426
    return false;
427
 
428
  for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
429
    {
430
      basic_block target, first;
431
      int counter, goto_locus;
432
      bool threaded = false;
433
      int nthreaded_edges = 0;
434
      bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
435
 
436
      /* Skip complex edges because we don't know how to update them.
437
 
438
         Still handle fallthru edges, as we can succeed to forward fallthru
439
         edge to the same place as the branch edge of conditional branch
440
         and turn conditional branch to an unconditional branch.  */
441
      if (e->flags & EDGE_COMPLEX)
442
        {
443
          ei_next (&ei);
444
          continue;
445
        }
446
 
447
      target = first = e->dest;
448
      counter = NUM_FIXED_BLOCKS;
449
      goto_locus = e->goto_locus;
450
 
451
      /* If we are partitioning hot/cold basic_blocks, we don't want to mess
452
         up jumps that cross between hot/cold sections.
453
 
454
         Basic block partitioning may result in some jumps that appear
455
         to be optimizable (or blocks that appear to be mergeable), but which
456
         really must be left untouched (they are required to make it safely
457
         across partition boundaries).  See the comments at the top of
458
         bb-reorder.c:partition_hot_cold_basic_blocks for complete
459
         details.  */
460
 
461
      if (first != EXIT_BLOCK_PTR
462
          && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
463
        return false;
464
 
465
      while (counter < n_basic_blocks)
466
        {
467
          basic_block new_target = NULL;
468
          bool new_target_threaded = false;
469
          may_thread |= (target->flags & BB_MODIFIED) != 0;
470
 
471
          if (FORWARDER_BLOCK_P (target)
472
              && !(single_succ_edge (target)->flags & EDGE_CROSSING)
473
              && single_succ (target) != EXIT_BLOCK_PTR)
474
            {
475
              /* Bypass trivial infinite loops.  */
476
              new_target = single_succ (target);
477
              if (target == new_target)
478
                counter = n_basic_blocks;
479
              else if (!optimize)
480
                {
481
                  /* When not optimizing, ensure that edges or forwarder
482
                     blocks with different locus are not optimized out.  */
483
                  int new_locus = single_succ_edge (target)->goto_locus;
484
                  int locus = goto_locus;
485
 
486
                  if (new_locus && locus && !locator_eq (new_locus, locus))
487
                    new_target = NULL;
488
                  else
489
                    {
490
                      rtx last;
491
 
492
                      if (new_locus)
493
                        locus = new_locus;
494
 
495
                      last = BB_END (target);
496
                      if (DEBUG_INSN_P (last))
497
                        last = prev_nondebug_insn (last);
498
 
499
                      new_locus = last && INSN_P (last)
500
                                  ? INSN_LOCATOR (last) : 0;
501
 
502
                      if (new_locus && locus && !locator_eq (new_locus, locus))
503
                        new_target = NULL;
504
                      else
505
                        {
506
                          if (new_locus)
507
                            locus = new_locus;
508
 
509
                          goto_locus = locus;
510
                        }
511
                    }
512
                }
513
            }
514
 
515
          /* Allow to thread only over one edge at time to simplify updating
516
             of probabilities.  */
517
          else if ((mode & CLEANUP_THREADING) && may_thread)
518
            {
519
              edge t = thread_jump (e, target);
520
              if (t)
521
                {
522
                  if (!threaded_edges)
523
                    threaded_edges = XNEWVEC (edge, n_basic_blocks);
524
                  else
525
                    {
526
                      int i;
527
 
528
                      /* Detect an infinite loop across blocks not
529
                         including the start block.  */
530
                      for (i = 0; i < nthreaded_edges; ++i)
531
                        if (threaded_edges[i] == t)
532
                          break;
533
                      if (i < nthreaded_edges)
534
                        {
535
                          counter = n_basic_blocks;
536
                          break;
537
                        }
538
                    }
539
 
540
                  /* Detect an infinite loop across the start block.  */
541
                  if (t->dest == b)
542
                    break;
543
 
544
                  gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
545
                  threaded_edges[nthreaded_edges++] = t;
546
 
547
                  new_target = t->dest;
548
                  new_target_threaded = true;
549
                }
550
            }
551
 
552
          if (!new_target)
553
            break;
554
 
555
          counter++;
556
          target = new_target;
557
          threaded |= new_target_threaded;
558
        }
559
 
560
      if (counter >= n_basic_blocks)
561
        {
562
          if (dump_file)
563
            fprintf (dump_file, "Infinite loop in BB %i.\n",
564
                     target->index);
565
        }
566
      else if (target == first)
567
        ; /* We didn't do anything.  */
568
      else
569
        {
570
          /* Save the values now, as the edge may get removed.  */
571
          gcov_type edge_count = e->count;
572
          int edge_probability = e->probability;
573
          int edge_frequency;
574
          int n = 0;
575
 
576
          e->goto_locus = goto_locus;
577
 
578
          /* Don't force if target is exit block.  */
579
          if (threaded && target != EXIT_BLOCK_PTR)
580
            {
581
              notice_new_block (redirect_edge_and_branch_force (e, target));
582
              if (dump_file)
583
                fprintf (dump_file, "Conditionals threaded.\n");
584
            }
585
          else if (!redirect_edge_and_branch (e, target))
586
            {
587
              if (dump_file)
588
                fprintf (dump_file,
589
                         "Forwarding edge %i->%i to %i failed.\n",
590
                         b->index, e->dest->index, target->index);
591
              ei_next (&ei);
592
              continue;
593
            }
594
 
595
          /* We successfully forwarded the edge.  Now update profile
596
             data: for each edge we traversed in the chain, remove
597
             the original edge's execution count.  */
598
          edge_frequency = ((edge_probability * b->frequency
599
                             + REG_BR_PROB_BASE / 2)
600
                            / REG_BR_PROB_BASE);
601
 
602
          do
603
            {
604
              edge t;
605
 
606
              if (!single_succ_p (first))
607
                {
608
                  gcc_assert (n < nthreaded_edges);
609
                  t = threaded_edges [n++];
610
                  gcc_assert (t->src == first);
611
                  update_bb_profile_for_threading (first, edge_frequency,
612
                                                   edge_count, t);
613
                  update_br_prob_note (first);
614
                }
615
              else
616
                {
617
                  first->count -= edge_count;
618
                  if (first->count < 0)
619
                    first->count = 0;
620
                  first->frequency -= edge_frequency;
621
                  if (first->frequency < 0)
622
                    first->frequency = 0;
623
                  /* It is possible that as the result of
624
                     threading we've removed edge as it is
625
                     threaded to the fallthru edge.  Avoid
626
                     getting out of sync.  */
627
                  if (n < nthreaded_edges
628
                      && first == threaded_edges [n]->src)
629
                    n++;
630
                  t = single_succ_edge (first);
631
                }
632
 
633
              t->count -= edge_count;
634
              if (t->count < 0)
635
                t->count = 0;
636
              first = t->dest;
637
            }
638
          while (first != target);
639
 
640
          changed = true;
641
          continue;
642
        }
643
      ei_next (&ei);
644
    }
645
 
646
  free (threaded_edges);
647
  return changed;
648
}
649
 
650
 
651
/* Blocks A and B are to be merged into a single block.  A has no incoming
652
   fallthru edge, so it can be moved before B without adding or modifying
653
   any jumps (aside from the jump from A to B).  */
654
 
655
static void
656
merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
657
{
658
  rtx barrier;
659
 
660
  /* If we are partitioning hot/cold basic blocks, we don't want to
661
     mess up unconditional or indirect jumps that cross between hot
662
     and cold sections.
663
 
664
     Basic block partitioning may result in some jumps that appear to
665
     be optimizable (or blocks that appear to be mergeable), but which really
666
     must be left untouched (they are required to make it safely across
667
     partition boundaries).  See the comments at the top of
668
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
669
 
670
  if (BB_PARTITION (a) != BB_PARTITION (b))
671
    return;
672
 
673
  barrier = next_nonnote_insn (BB_END (a));
674
  gcc_assert (BARRIER_P (barrier));
675
  delete_insn (barrier);
676
 
677
  /* Scramble the insn chain.  */
678
  if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
679
    reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
680
  df_set_bb_dirty (a);
681
 
682
  if (dump_file)
683
    fprintf (dump_file, "Moved block %d before %d and merged.\n",
684
             a->index, b->index);
685
 
686
  /* Swap the records for the two blocks around.  */
687
 
688
  unlink_block (a);
689
  link_block (a, b->prev_bb);
690
 
691
  /* Now blocks A and B are contiguous.  Merge them.  */
692
  merge_blocks (a, b);
693
}
694
 
695
/* Blocks A and B are to be merged into a single block.  B has no outgoing
696
   fallthru edge, so it can be moved after A without adding or modifying
697
   any jumps (aside from the jump from A to B).  */
698
 
699
static void
700
merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
701
{
702
  rtx barrier, real_b_end;
703
  rtx label, table;
704
 
705
  /* If we are partitioning hot/cold basic blocks, we don't want to
706
     mess up unconditional or indirect jumps that cross between hot
707
     and cold sections.
708
 
709
     Basic block partitioning may result in some jumps that appear to
710
     be optimizable (or blocks that appear to be mergeable), but which really
711
     must be left untouched (they are required to make it safely across
712
     partition boundaries).  See the comments at the top of
713
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
714
 
715
  if (BB_PARTITION (a) != BB_PARTITION (b))
716
    return;
717
 
718
  real_b_end = BB_END (b);
719
 
720
  /* If there is a jump table following block B temporarily add the jump table
721
     to block B so that it will also be moved to the correct location.  */
722
  if (tablejump_p (BB_END (b), &label, &table)
723
      && prev_active_insn (label) == BB_END (b))
724
    {
725
      BB_END (b) = table;
726
    }
727
 
728
  /* There had better have been a barrier there.  Delete it.  */
729
  barrier = NEXT_INSN (BB_END (b));
730
  if (barrier && BARRIER_P (barrier))
731
    delete_insn (barrier);
732
 
733
 
734
  /* Scramble the insn chain.  */
735
  reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
736
 
737
  /* Restore the real end of b.  */
738
  BB_END (b) = real_b_end;
739
 
740
  if (dump_file)
741
    fprintf (dump_file, "Moved block %d after %d and merged.\n",
742
             b->index, a->index);
743
 
744
  /* Now blocks A and B are contiguous.  Merge them.  */
745
  merge_blocks (a, b);
746
}
747
 
748
/* Attempt to merge basic blocks that are potentially non-adjacent.
749
   Return NULL iff the attempt failed, otherwise return basic block
750
   where cleanup_cfg should continue.  Because the merging commonly
751
   moves basic block away or introduces another optimization
752
   possibility, return basic block just before B so cleanup_cfg don't
753
   need to iterate.
754
 
755
   It may be good idea to return basic block before C in the case
756
   C has been moved after B and originally appeared earlier in the
757
   insn sequence, but we have no information available about the
758
   relative ordering of these two.  Hopefully it is not too common.  */
759
 
760
static basic_block
761
merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
762
{
763
  basic_block next;
764
 
765
  /* If we are partitioning hot/cold basic blocks, we don't want to
766
     mess up unconditional or indirect jumps that cross between hot
767
     and cold sections.
768
 
769
     Basic block partitioning may result in some jumps that appear to
770
     be optimizable (or blocks that appear to be mergeable), but which really
771
     must be left untouched (they are required to make it safely across
772
     partition boundaries).  See the comments at the top of
773
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
774
 
775
  if (BB_PARTITION (b) != BB_PARTITION (c))
776
    return NULL;
777
 
778
  /* If B has a fallthru edge to C, no need to move anything.  */
779
  if (e->flags & EDGE_FALLTHRU)
780
    {
781
      int b_index = b->index, c_index = c->index;
782
      merge_blocks (b, c);
783
      update_forwarder_flag (b);
784
 
785
      if (dump_file)
786
        fprintf (dump_file, "Merged %d and %d without moving.\n",
787
                 b_index, c_index);
788
 
789
      return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
790
    }
791
 
792
  /* Otherwise we will need to move code around.  Do that only if expensive
793
     transformations are allowed.  */
794
  else if (mode & CLEANUP_EXPENSIVE)
795
    {
796
      edge tmp_edge, b_fallthru_edge;
797
      bool c_has_outgoing_fallthru;
798
      bool b_has_incoming_fallthru;
799
 
800
      /* Avoid overactive code motion, as the forwarder blocks should be
801
         eliminated by edge redirection instead.  One exception might have
802
         been if B is a forwarder block and C has no fallthru edge, but
803
         that should be cleaned up by bb-reorder instead.  */
804
      if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
805
        return NULL;
806
 
807
      /* We must make sure to not munge nesting of lexical blocks,
808
         and loop notes.  This is done by squeezing out all the notes
809
         and leaving them there to lie.  Not ideal, but functional.  */
810
 
811
      tmp_edge = find_fallthru_edge (c->succs);
812
      c_has_outgoing_fallthru = (tmp_edge != NULL);
813
 
814
      tmp_edge = find_fallthru_edge (b->preds);
815
      b_has_incoming_fallthru = (tmp_edge != NULL);
816
      b_fallthru_edge = tmp_edge;
817
      next = b->prev_bb;
818
      if (next == c)
819
        next = next->prev_bb;
820
 
821
      /* Otherwise, we're going to try to move C after B.  If C does
822
         not have an outgoing fallthru, then it can be moved
823
         immediately after B without introducing or modifying jumps.  */
824
      if (! c_has_outgoing_fallthru)
825
        {
826
          merge_blocks_move_successor_nojumps (b, c);
827
          return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
828
        }
829
 
830
      /* If B does not have an incoming fallthru, then it can be moved
831
         immediately before C without introducing or modifying jumps.
832
         C cannot be the first block, so we do not have to worry about
833
         accessing a non-existent block.  */
834
 
835
      if (b_has_incoming_fallthru)
836
        {
837
          basic_block bb;
838
 
839
          if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
840
            return NULL;
841
          bb = force_nonfallthru (b_fallthru_edge);
842
          if (bb)
843
            notice_new_block (bb);
844
        }
845
 
846
      merge_blocks_move_predecessor_nojumps (b, c);
847
      return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
848
    }
849
 
850
  return NULL;
851
}
852
 
853
 
854
/* Removes the memory attributes of MEM expression
855
   if they are not equal.  */
856
 
857
void
858
merge_memattrs (rtx x, rtx y)
859
{
860
  int i;
861
  int j;
862
  enum rtx_code code;
863
  const char *fmt;
864
 
865
  if (x == y)
866
    return;
867
  if (x == 0 || y == 0)
868
    return;
869
 
870
  code = GET_CODE (x);
871
 
872
  if (code != GET_CODE (y))
873
    return;
874
 
875
  if (GET_MODE (x) != GET_MODE (y))
876
    return;
877
 
878
  if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
879
    {
880
      if (! MEM_ATTRS (x))
881
        MEM_ATTRS (y) = 0;
882
      else if (! MEM_ATTRS (y))
883
        MEM_ATTRS (x) = 0;
884
      else
885
        {
886
          HOST_WIDE_INT mem_size;
887
 
888
          if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
889
            {
890
              set_mem_alias_set (x, 0);
891
              set_mem_alias_set (y, 0);
892
            }
893
 
894
          if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
895
            {
896
              set_mem_expr (x, 0);
897
              set_mem_expr (y, 0);
898
              clear_mem_offset (x);
899
              clear_mem_offset (y);
900
            }
901
          else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
902
                   || (MEM_OFFSET_KNOWN_P (x)
903
                       && MEM_OFFSET (x) != MEM_OFFSET (y)))
904
            {
905
              clear_mem_offset (x);
906
              clear_mem_offset (y);
907
            }
908
 
909
          if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
910
            {
911
              mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
912
              set_mem_size (x, mem_size);
913
              set_mem_size (y, mem_size);
914
            }
915
          else
916
            {
917
              clear_mem_size (x);
918
              clear_mem_size (y);
919
            }
920
 
921
          set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
922
          set_mem_align (y, MEM_ALIGN (x));
923
        }
924
    }
925
 
926
  fmt = GET_RTX_FORMAT (code);
927
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
928
    {
929
      switch (fmt[i])
930
        {
931
        case 'E':
932
          /* Two vectors must have the same length.  */
933
          if (XVECLEN (x, i) != XVECLEN (y, i))
934
            return;
935
 
936
          for (j = 0; j < XVECLEN (x, i); j++)
937
            merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
938
 
939
          break;
940
 
941
        case 'e':
942
          merge_memattrs (XEXP (x, i), XEXP (y, i));
943
        }
944
    }
945
  return;
946
}
947
 
948
 
949
 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
950
    different single sets S1 and S2.  */
951
 
952
static bool
953
equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
954
{
955
  int i;
956
  rtx e1, e2;
957
 
958
  if (p1 == s1 && p2 == s2)
959
    return true;
960
 
961
  if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
962
    return false;
963
 
964
  if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
965
    return false;
966
 
967
  for (i = 0; i < XVECLEN (p1, 0); i++)
968
    {
969
      e1 = XVECEXP (p1, 0, i);
970
      e2 = XVECEXP (p2, 0, i);
971
      if (e1 == s1 && e2 == s2)
972
        continue;
973
      if (reload_completed
974
          ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
975
        continue;
976
 
977
        return false;
978
    }
979
 
980
  return true;
981
}
982
 
983
/* Examine register notes on I1 and I2 and return:
984
   - dir_forward if I1 can be replaced by I2, or
985
   - dir_backward if I2 can be replaced by I1, or
986
   - dir_both if both are the case.  */
987
 
988
static enum replace_direction
989
can_replace_by (rtx i1, rtx i2)
990
{
991
  rtx s1, s2, d1, d2, src1, src2, note1, note2;
992
  bool c1, c2;
993
 
994
  /* Check for 2 sets.  */
995
  s1 = single_set (i1);
996
  s2 = single_set (i2);
997
  if (s1 == NULL_RTX || s2 == NULL_RTX)
998
    return dir_none;
999
 
1000
  /* Check that the 2 sets set the same dest.  */
1001
  d1 = SET_DEST (s1);
1002
  d2 = SET_DEST (s2);
1003
  if (!(reload_completed
1004
        ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1005
    return dir_none;
1006
 
1007
  /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1008
     set dest to the same value.  */
1009
  note1 = find_reg_equal_equiv_note (i1);
1010
  note2 = find_reg_equal_equiv_note (i2);
1011
  if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
1012
      || !CONST_INT_P (XEXP (note1, 0)))
1013
    return dir_none;
1014
 
1015
  if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1016
    return dir_none;
1017
 
1018
  /* Although the 2 sets set dest to the same value, we cannot replace
1019
       (set (dest) (const_int))
1020
     by
1021
       (set (dest) (reg))
1022
     because we don't know if the reg is live and has the same value at the
1023
     location of replacement.  */
1024
  src1 = SET_SRC (s1);
1025
  src2 = SET_SRC (s2);
1026
  c1 = CONST_INT_P (src1);
1027
  c2 = CONST_INT_P (src2);
1028
  if (c1 && c2)
1029
    return dir_both;
1030
  else if (c2)
1031
    return dir_forward;
1032
  else if (c1)
1033
    return dir_backward;
1034
 
1035
  return dir_none;
1036
}
1037
 
1038
/* Merges directions A and B.  */
1039
 
1040
static enum replace_direction
1041
merge_dir (enum replace_direction a, enum replace_direction b)
1042
{
1043
  /* Implements the following table:
1044
        |bo fw bw no
1045
     ---+-----------
1046
     bo |bo fw bw no
1047
     fw |-- fw no no
1048
     bw |-- -- bw no
1049
     no |-- -- -- no.  */
1050
 
1051
  if (a == b)
1052
    return a;
1053
 
1054
  if (a == dir_both)
1055
    return b;
1056
  if (b == dir_both)
1057
    return a;
1058
 
1059
  return dir_none;
1060
}
1061
 
1062
/* Examine I1 and I2 and return:
1063
   - dir_forward if I1 can be replaced by I2, or
1064
   - dir_backward if I2 can be replaced by I1, or
1065
   - dir_both if both are the case.  */
1066
 
1067
static enum replace_direction
1068
old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1069
{
1070
  rtx p1, p2;
1071
 
1072
  /* Verify that I1 and I2 are equivalent.  */
1073
  if (GET_CODE (i1) != GET_CODE (i2))
1074
    return dir_none;
1075
 
1076
  /* __builtin_unreachable() may lead to empty blocks (ending with
1077
     NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1078
  if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1079
    return dir_both;
1080
 
1081
  /* ??? Do not allow cross-jumping between different stack levels.  */
1082
  p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1083
  p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1084
  if (p1 && p2)
1085
    {
1086
      p1 = XEXP (p1, 0);
1087
      p2 = XEXP (p2, 0);
1088
      if (!rtx_equal_p (p1, p2))
1089
        return dir_none;
1090
 
1091
      /* ??? Worse, this adjustment had better be constant lest we
1092
         have differing incoming stack levels.  */
1093
      if (!frame_pointer_needed
1094
          && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1095
        return dir_none;
1096
    }
1097
  else if (p1 || p2)
1098
    return dir_none;
1099
 
1100
  p1 = PATTERN (i1);
1101
  p2 = PATTERN (i2);
1102
 
1103
  if (GET_CODE (p1) != GET_CODE (p2))
1104
    return dir_none;
1105
 
1106
  /* If this is a CALL_INSN, compare register usage information.
1107
     If we don't check this on stack register machines, the two
1108
     CALL_INSNs might be merged leaving reg-stack.c with mismatching
1109
     numbers of stack registers in the same basic block.
1110
     If we don't check this on machines with delay slots, a delay slot may
1111
     be filled that clobbers a parameter expected by the subroutine.
1112
 
1113
     ??? We take the simple route for now and assume that if they're
1114
     equal, they were constructed identically.
1115
 
1116
     Also check for identical exception regions.  */
1117
 
1118
  if (CALL_P (i1))
1119
    {
1120
      /* Ensure the same EH region.  */
1121
      rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1122
      rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1123
 
1124
      if (!n1 && n2)
1125
        return dir_none;
1126
 
1127
      if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1128
        return dir_none;
1129
 
1130
      if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1131
                        CALL_INSN_FUNCTION_USAGE (i2))
1132
          || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1133
        return dir_none;
1134
    }
1135
 
1136
#ifdef STACK_REGS
1137
  /* If cross_jump_death_matters is not 0, the insn's mode
1138
     indicates whether or not the insn contains any stack-like
1139
     regs.  */
1140
 
1141
  if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1142
    {
1143
      /* If register stack conversion has already been done, then
1144
         death notes must also be compared before it is certain that
1145
         the two instruction streams match.  */
1146
 
1147
      rtx note;
1148
      HARD_REG_SET i1_regset, i2_regset;
1149
 
1150
      CLEAR_HARD_REG_SET (i1_regset);
1151
      CLEAR_HARD_REG_SET (i2_regset);
1152
 
1153
      for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1154
        if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1155
          SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1156
 
1157
      for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1158
        if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1159
          SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1160
 
1161
      if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1162
        return dir_none;
1163
    }
1164
#endif
1165
 
1166
  if (reload_completed
1167
      ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1168
    return dir_both;
1169
 
1170
  return can_replace_by (i1, i2);
1171
}
1172
 
1173
/* When comparing insns I1 and I2 in flow_find_cross_jump or
1174
   flow_find_head_matching_sequence, ensure the notes match.  */
1175
 
1176
static void
1177
merge_notes (rtx i1, rtx i2)
1178
{
1179
  /* If the merged insns have different REG_EQUAL notes, then
1180
     remove them.  */
1181
  rtx equiv1 = find_reg_equal_equiv_note (i1);
1182
  rtx equiv2 = find_reg_equal_equiv_note (i2);
1183
 
1184
  if (equiv1 && !equiv2)
1185
    remove_note (i1, equiv1);
1186
  else if (!equiv1 && equiv2)
1187
    remove_note (i2, equiv2);
1188
  else if (equiv1 && equiv2
1189
           && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1190
    {
1191
      remove_note (i1, equiv1);
1192
      remove_note (i2, equiv2);
1193
    }
1194
}
1195
 
1196
 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1197
    resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1198
    bb, if there is a predecessor bb that reaches this bb via fallthru, and
1199
    FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1200
    DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1201
 
1202
static void
1203
walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
1204
                       bool *did_fallthru)
1205
{
1206
  edge fallthru;
1207
 
1208
  *did_fallthru = false;
1209
 
1210
  /* Ignore notes.  */
1211
  while (!NONDEBUG_INSN_P (*i1))
1212
    {
1213
      if (*i1 != BB_HEAD (*bb1))
1214
        {
1215
          *i1 = PREV_INSN (*i1);
1216
          continue;
1217
        }
1218
 
1219
      if (!follow_fallthru)
1220
        return;
1221
 
1222
      fallthru = find_fallthru_edge ((*bb1)->preds);
1223
      if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
1224
          || !single_succ_p (fallthru->src))
1225
        return;
1226
 
1227
      *bb1 = fallthru->src;
1228
      *i1 = BB_END (*bb1);
1229
      *did_fallthru = true;
1230
     }
1231
}
1232
 
1233
/* Look through the insns at the end of BB1 and BB2 and find the longest
1234
   sequence that are either equivalent, or allow forward or backward
1235
   replacement.  Store the first insns for that sequence in *F1 and *F2 and
1236
   return the sequence length.
1237
 
1238
   DIR_P indicates the allowed replacement direction on function entry, and
1239
   the actual replacement direction on function exit.  If NULL, only equivalent
1240
   sequences are allowed.
1241
 
1242
   To simplify callers of this function, if the blocks match exactly,
1243
   store the head of the blocks in *F1 and *F2.  */
1244
 
1245
int
1246
flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
1247
                      enum replace_direction *dir_p)
1248
{
1249
  rtx i1, i2, last1, last2, afterlast1, afterlast2;
1250
  int ninsns = 0;
1251
  rtx p1;
1252
  enum replace_direction dir, last_dir, afterlast_dir;
1253
  bool follow_fallthru, did_fallthru;
1254
 
1255
  if (dir_p)
1256
    dir = *dir_p;
1257
  else
1258
    dir = dir_both;
1259
  afterlast_dir = dir;
1260
  last_dir = afterlast_dir;
1261
 
1262
  /* Skip simple jumps at the end of the blocks.  Complex jumps still
1263
     need to be compared for equivalence, which we'll do below.  */
1264
 
1265
  i1 = BB_END (bb1);
1266
  last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1267
  if (onlyjump_p (i1)
1268
      || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1269
    {
1270
      last1 = i1;
1271
      i1 = PREV_INSN (i1);
1272
    }
1273
 
1274
  i2 = BB_END (bb2);
1275
  if (onlyjump_p (i2)
1276
      || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1277
    {
1278
      last2 = i2;
1279
      /* Count everything except for unconditional jump as insn.  */
1280
      if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1281
        ninsns++;
1282
      i2 = PREV_INSN (i2);
1283
    }
1284
 
1285
  while (true)
1286
    {
1287
      /* In the following example, we can replace all jumps to C by jumps to A.
1288
 
1289
         This removes 4 duplicate insns.
1290
         [bb A] insn1            [bb C] insn1
1291
                insn2                   insn2
1292
         [bb B] insn3                   insn3
1293
                insn4                   insn4
1294
                jump_insn               jump_insn
1295
 
1296
         We could also replace all jumps to A by jumps to C, but that leaves B
1297
         alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1298
         step, all jumps to B would be replaced with jumps to the middle of C,
1299
         achieving the same result with more effort.
1300
         So we allow only the first possibility, which means that we don't allow
1301
         fallthru in the block that's being replaced.  */
1302
 
1303
      follow_fallthru = dir_p && dir != dir_forward;
1304
      walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1305
      if (did_fallthru)
1306
        dir = dir_backward;
1307
 
1308
      follow_fallthru = dir_p && dir != dir_backward;
1309
      walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1310
      if (did_fallthru)
1311
        dir = dir_forward;
1312
 
1313
      if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1314
        break;
1315
 
1316
      dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1317
      if (dir == dir_none || (!dir_p && dir != dir_both))
1318
        break;
1319
 
1320
      merge_memattrs (i1, i2);
1321
 
1322
      /* Don't begin a cross-jump with a NOTE insn.  */
1323
      if (INSN_P (i1))
1324
        {
1325
          merge_notes (i1, i2);
1326
 
1327
          afterlast1 = last1, afterlast2 = last2;
1328
          last1 = i1, last2 = i2;
1329
          afterlast_dir = last_dir;
1330
          last_dir = dir;
1331
          p1 = PATTERN (i1);
1332
          if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
1333
            ninsns++;
1334
        }
1335
 
1336
      i1 = PREV_INSN (i1);
1337
      i2 = PREV_INSN (i2);
1338
    }
1339
 
1340
#ifdef HAVE_cc0
1341
  /* Don't allow the insn after a compare to be shared by
1342
     cross-jumping unless the compare is also shared.  */
1343
  if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1344
    last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1345
#endif
1346
 
1347
  /* Include preceding notes and labels in the cross-jump.  One,
1348
     this may bring us to the head of the blocks as requested above.
1349
     Two, it keeps line number notes as matched as may be.  */
1350
  if (ninsns)
1351
    {
1352
      bb1 = BLOCK_FOR_INSN (last1);
1353
      while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1354
        last1 = PREV_INSN (last1);
1355
 
1356
      if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1357
        last1 = PREV_INSN (last1);
1358
 
1359
      bb2 = BLOCK_FOR_INSN (last2);
1360
      while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1361
        last2 = PREV_INSN (last2);
1362
 
1363
      if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1364
        last2 = PREV_INSN (last2);
1365
 
1366
      *f1 = last1;
1367
      *f2 = last2;
1368
    }
1369
 
1370
  if (dir_p)
1371
    *dir_p = last_dir;
1372
  return ninsns;
1373
}
1374
 
1375
/* Like flow_find_cross_jump, except start looking for a matching sequence from
1376
   the head of the two blocks.  Do not include jumps at the end.
1377
   If STOP_AFTER is nonzero, stop after finding that many matching
1378
   instructions.  */
1379
 
1380
int
1381
flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
1382
                                  rtx *f2, int stop_after)
1383
{
1384
  rtx i1, i2, last1, last2, beforelast1, beforelast2;
1385
  int ninsns = 0;
1386
  edge e;
1387
  edge_iterator ei;
1388
  int nehedges1 = 0, nehedges2 = 0;
1389
 
1390
  FOR_EACH_EDGE (e, ei, bb1->succs)
1391
    if (e->flags & EDGE_EH)
1392
      nehedges1++;
1393
  FOR_EACH_EDGE (e, ei, bb2->succs)
1394
    if (e->flags & EDGE_EH)
1395
      nehedges2++;
1396
 
1397
  i1 = BB_HEAD (bb1);
1398
  i2 = BB_HEAD (bb2);
1399
  last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
1400
 
1401
  while (true)
1402
    {
1403
      /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1404
      while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1405
        {
1406
          if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1407
            break;
1408
          i1 = NEXT_INSN (i1);
1409
        }
1410
 
1411
      while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1412
        {
1413
          if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1414
            break;
1415
          i2 = NEXT_INSN (i2);
1416
        }
1417
 
1418
      if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1419
          || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1420
        break;
1421
 
1422
      if (NOTE_P (i1) || NOTE_P (i2)
1423
          || JUMP_P (i1) || JUMP_P (i2))
1424
        break;
1425
 
1426
      /* A sanity check to make sure we're not merging insns with different
1427
         effects on EH.  If only one of them ends a basic block, it shouldn't
1428
         have an EH edge; if both end a basic block, there should be the same
1429
         number of EH edges.  */
1430
      if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1431
           && nehedges1 > 0)
1432
          || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1433
              && nehedges2 > 0)
1434
          || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1435
              && nehedges1 != nehedges2))
1436
        break;
1437
 
1438
      if (old_insns_match_p (0, i1, i2) != dir_both)
1439
        break;
1440
 
1441
      merge_memattrs (i1, i2);
1442
 
1443
      /* Don't begin a cross-jump with a NOTE insn.  */
1444
      if (INSN_P (i1))
1445
        {
1446
          merge_notes (i1, i2);
1447
 
1448
          beforelast1 = last1, beforelast2 = last2;
1449
          last1 = i1, last2 = i2;
1450
          ninsns++;
1451
        }
1452
 
1453
      if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1454
          || (stop_after > 0 && ninsns == stop_after))
1455
        break;
1456
 
1457
      i1 = NEXT_INSN (i1);
1458
      i2 = NEXT_INSN (i2);
1459
    }
1460
 
1461
#ifdef HAVE_cc0
1462
  /* Don't allow a compare to be shared by cross-jumping unless the insn
1463
     after the compare is also shared.  */
1464
  if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1465
    last1 = beforelast1, last2 = beforelast2, ninsns--;
1466
#endif
1467
 
1468
  if (ninsns)
1469
    {
1470
      *f1 = last1;
1471
      *f2 = last2;
1472
    }
1473
 
1474
  return ninsns;
1475
}
1476
 
1477
/* Return true iff outgoing edges of BB1 and BB2 match, together with
1478
   the branch instruction.  This means that if we commonize the control
1479
   flow before end of the basic block, the semantic remains unchanged.
1480
 
1481
   We may assume that there exists one edge with a common destination.  */
1482
 
1483
static bool
1484
outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1485
{
1486
  int nehedges1 = 0, nehedges2 = 0;
1487
  edge fallthru1 = 0, fallthru2 = 0;
1488
  edge e1, e2;
1489
  edge_iterator ei;
1490
 
1491
  /* If we performed shrink-wrapping, edges to the EXIT_BLOCK_PTR can
1492
     only be distinguished for JUMP_INSNs.  The two paths may differ in
1493
     whether they went through the prologue.  Sibcalls are fine, we know
1494
     that we either didn't need or inserted an epilogue before them.  */
1495
  if (crtl->shrink_wrapped
1496
      && single_succ_p (bb1) && single_succ (bb1) == EXIT_BLOCK_PTR
1497
      && !JUMP_P (BB_END (bb1))
1498
      && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1499
    return false;
1500
 
1501
  /* If BB1 has only one successor, we may be looking at either an
1502
     unconditional jump, or a fake edge to exit.  */
1503
  if (single_succ_p (bb1)
1504
      && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1505
      && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1506
    return (single_succ_p (bb2)
1507
            && (single_succ_edge (bb2)->flags
1508
                & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1509
            && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1510
 
1511
  /* Match conditional jumps - this may get tricky when fallthru and branch
1512
     edges are crossed.  */
1513
  if (EDGE_COUNT (bb1->succs) == 2
1514
      && any_condjump_p (BB_END (bb1))
1515
      && onlyjump_p (BB_END (bb1)))
1516
    {
1517
      edge b1, f1, b2, f2;
1518
      bool reverse, match;
1519
      rtx set1, set2, cond1, cond2;
1520
      enum rtx_code code1, code2;
1521
 
1522
      if (EDGE_COUNT (bb2->succs) != 2
1523
          || !any_condjump_p (BB_END (bb2))
1524
          || !onlyjump_p (BB_END (bb2)))
1525
        return false;
1526
 
1527
      b1 = BRANCH_EDGE (bb1);
1528
      b2 = BRANCH_EDGE (bb2);
1529
      f1 = FALLTHRU_EDGE (bb1);
1530
      f2 = FALLTHRU_EDGE (bb2);
1531
 
1532
      /* Get around possible forwarders on fallthru edges.  Other cases
1533
         should be optimized out already.  */
1534
      if (FORWARDER_BLOCK_P (f1->dest))
1535
        f1 = single_succ_edge (f1->dest);
1536
 
1537
      if (FORWARDER_BLOCK_P (f2->dest))
1538
        f2 = single_succ_edge (f2->dest);
1539
 
1540
      /* To simplify use of this function, return false if there are
1541
         unneeded forwarder blocks.  These will get eliminated later
1542
         during cleanup_cfg.  */
1543
      if (FORWARDER_BLOCK_P (f1->dest)
1544
          || FORWARDER_BLOCK_P (f2->dest)
1545
          || FORWARDER_BLOCK_P (b1->dest)
1546
          || FORWARDER_BLOCK_P (b2->dest))
1547
        return false;
1548
 
1549
      if (f1->dest == f2->dest && b1->dest == b2->dest)
1550
        reverse = false;
1551
      else if (f1->dest == b2->dest && b1->dest == f2->dest)
1552
        reverse = true;
1553
      else
1554
        return false;
1555
 
1556
      set1 = pc_set (BB_END (bb1));
1557
      set2 = pc_set (BB_END (bb2));
1558
      if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1559
          != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1560
        reverse = !reverse;
1561
 
1562
      cond1 = XEXP (SET_SRC (set1), 0);
1563
      cond2 = XEXP (SET_SRC (set2), 0);
1564
      code1 = GET_CODE (cond1);
1565
      if (reverse)
1566
        code2 = reversed_comparison_code (cond2, BB_END (bb2));
1567
      else
1568
        code2 = GET_CODE (cond2);
1569
 
1570
      if (code2 == UNKNOWN)
1571
        return false;
1572
 
1573
      /* Verify codes and operands match.  */
1574
      match = ((code1 == code2
1575
                && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1576
                && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1577
               || (code1 == swap_condition (code2)
1578
                   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1579
                                              XEXP (cond2, 0))
1580
                   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1581
                                              XEXP (cond2, 1))));
1582
 
1583
      /* If we return true, we will join the blocks.  Which means that
1584
         we will only have one branch prediction bit to work with.  Thus
1585
         we require the existing branches to have probabilities that are
1586
         roughly similar.  */
1587
      if (match
1588
          && optimize_bb_for_speed_p (bb1)
1589
          && optimize_bb_for_speed_p (bb2))
1590
        {
1591
          int prob2;
1592
 
1593
          if (b1->dest == b2->dest)
1594
            prob2 = b2->probability;
1595
          else
1596
            /* Do not use f2 probability as f2 may be forwarded.  */
1597
            prob2 = REG_BR_PROB_BASE - b2->probability;
1598
 
1599
          /* Fail if the difference in probabilities is greater than 50%.
1600
             This rules out two well-predicted branches with opposite
1601
             outcomes.  */
1602
          if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1603
            {
1604
              if (dump_file)
1605
                fprintf (dump_file,
1606
                         "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1607
                         bb1->index, bb2->index, b1->probability, prob2);
1608
 
1609
              return false;
1610
            }
1611
        }
1612
 
1613
      if (dump_file && match)
1614
        fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1615
                 bb1->index, bb2->index);
1616
 
1617
      return match;
1618
    }
1619
 
1620
  /* Generic case - we are seeing a computed jump, table jump or trapping
1621
     instruction.  */
1622
 
1623
  /* Check whether there are tablejumps in the end of BB1 and BB2.
1624
     Return true if they are identical.  */
1625
    {
1626
      rtx label1, label2;
1627
      rtx table1, table2;
1628
 
1629
      if (tablejump_p (BB_END (bb1), &label1, &table1)
1630
          && tablejump_p (BB_END (bb2), &label2, &table2)
1631
          && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1632
        {
1633
          /* The labels should never be the same rtx.  If they really are same
1634
             the jump tables are same too. So disable crossjumping of blocks BB1
1635
             and BB2 because when deleting the common insns in the end of BB1
1636
             by delete_basic_block () the jump table would be deleted too.  */
1637
          /* If LABEL2 is referenced in BB1->END do not do anything
1638
             because we would loose information when replacing
1639
             LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1640
          if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1641
            {
1642
              /* Set IDENTICAL to true when the tables are identical.  */
1643
              bool identical = false;
1644
              rtx p1, p2;
1645
 
1646
              p1 = PATTERN (table1);
1647
              p2 = PATTERN (table2);
1648
              if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1649
                {
1650
                  identical = true;
1651
                }
1652
              else if (GET_CODE (p1) == ADDR_DIFF_VEC
1653
                       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1654
                       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1655
                       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1656
                {
1657
                  int i;
1658
 
1659
                  identical = true;
1660
                  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1661
                    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1662
                      identical = false;
1663
                }
1664
 
1665
              if (identical)
1666
                {
1667
                  replace_label_data rr;
1668
                  bool match;
1669
 
1670
                  /* Temporarily replace references to LABEL1 with LABEL2
1671
                     in BB1->END so that we could compare the instructions.  */
1672
                  rr.r1 = label1;
1673
                  rr.r2 = label2;
1674
                  rr.update_label_nuses = false;
1675
                  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1676
 
1677
                  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1678
                           == dir_both);
1679
                  if (dump_file && match)
1680
                    fprintf (dump_file,
1681
                             "Tablejumps in bb %i and %i match.\n",
1682
                             bb1->index, bb2->index);
1683
 
1684
                  /* Set the original label in BB1->END because when deleting
1685
                     a block whose end is a tablejump, the tablejump referenced
1686
                     from the instruction is deleted too.  */
1687
                  rr.r1 = label2;
1688
                  rr.r2 = label1;
1689
                  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1690
 
1691
                  return match;
1692
                }
1693
            }
1694
          return false;
1695
        }
1696
    }
1697
 
1698
  /* First ensure that the instructions match.  There may be many outgoing
1699
     edges so this test is generally cheaper.  */
1700
  if (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) != dir_both)
1701
    return false;
1702
 
1703
  /* Search the outgoing edges, ensure that the counts do match, find possible
1704
     fallthru and exception handling edges since these needs more
1705
     validation.  */
1706
  if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1707
    return false;
1708
 
1709
  FOR_EACH_EDGE (e1, ei, bb1->succs)
1710
    {
1711
      e2 = EDGE_SUCC (bb2, ei.index);
1712
 
1713
      if (e1->flags & EDGE_EH)
1714
        nehedges1++;
1715
 
1716
      if (e2->flags & EDGE_EH)
1717
        nehedges2++;
1718
 
1719
      if (e1->flags & EDGE_FALLTHRU)
1720
        fallthru1 = e1;
1721
      if (e2->flags & EDGE_FALLTHRU)
1722
        fallthru2 = e2;
1723
    }
1724
 
1725
  /* If number of edges of various types does not match, fail.  */
1726
  if (nehedges1 != nehedges2
1727
      || (fallthru1 != 0) != (fallthru2 != 0))
1728
    return false;
1729
 
1730
  /* fallthru edges must be forwarded to the same destination.  */
1731
  if (fallthru1)
1732
    {
1733
      basic_block d1 = (forwarder_block_p (fallthru1->dest)
1734
                        ? single_succ (fallthru1->dest): fallthru1->dest);
1735
      basic_block d2 = (forwarder_block_p (fallthru2->dest)
1736
                        ? single_succ (fallthru2->dest): fallthru2->dest);
1737
 
1738
      if (d1 != d2)
1739
        return false;
1740
    }
1741
 
1742
  /* Ensure the same EH region.  */
1743
  {
1744
    rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1745
    rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1746
 
1747
    if (!n1 && n2)
1748
      return false;
1749
 
1750
    if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1751
      return false;
1752
  }
1753
 
1754
  /* The same checks as in try_crossjump_to_edge. It is required for RTL
1755
     version of sequence abstraction.  */
1756
  FOR_EACH_EDGE (e1, ei, bb2->succs)
1757
    {
1758
      edge e2;
1759
      edge_iterator ei;
1760
      basic_block d1 = e1->dest;
1761
 
1762
      if (FORWARDER_BLOCK_P (d1))
1763
        d1 = EDGE_SUCC (d1, 0)->dest;
1764
 
1765
      FOR_EACH_EDGE (e2, ei, bb1->succs)
1766
        {
1767
          basic_block d2 = e2->dest;
1768
          if (FORWARDER_BLOCK_P (d2))
1769
            d2 = EDGE_SUCC (d2, 0)->dest;
1770
          if (d1 == d2)
1771
            break;
1772
        }
1773
 
1774
      if (!e2)
1775
        return false;
1776
    }
1777
 
1778
  return true;
1779
}
1780
 
1781
/* Returns true if BB basic block has a preserve label.  */
1782
 
1783
static bool
1784
block_has_preserve_label (basic_block bb)
1785
{
1786
  return (bb
1787
          && block_label (bb)
1788
          && LABEL_PRESERVE_P (block_label (bb)));
1789
}
1790
 
1791
/* E1 and E2 are edges with the same destination block.  Search their
1792
   predecessors for common code.  If found, redirect control flow from
1793
   (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1794
   or the other way around (dir_backward).  DIR specifies the allowed
1795
   replacement direction.  */
1796
 
1797
static bool
1798
try_crossjump_to_edge (int mode, edge e1, edge e2,
1799
                       enum replace_direction dir)
1800
{
1801
  int nmatch;
1802
  basic_block src1 = e1->src, src2 = e2->src;
1803
  basic_block redirect_to, redirect_from, to_remove;
1804
  basic_block osrc1, osrc2, redirect_edges_to, tmp;
1805
  rtx newpos1, newpos2;
1806
  edge s;
1807
  edge_iterator ei;
1808
 
1809
  newpos1 = newpos2 = NULL_RTX;
1810
 
1811
  /* If we have partitioned hot/cold basic blocks, it is a bad idea
1812
     to try this optimization.
1813
 
1814
     Basic block partitioning may result in some jumps that appear to
1815
     be optimizable (or blocks that appear to be mergeable), but which really
1816
     must be left untouched (they are required to make it safely across
1817
     partition boundaries).  See the comments at the top of
1818
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1819
 
1820
  if (flag_reorder_blocks_and_partition && reload_completed)
1821
    return false;
1822
 
1823
  /* Search backward through forwarder blocks.  We don't need to worry
1824
     about multiple entry or chained forwarders, as they will be optimized
1825
     away.  We do this to look past the unconditional jump following a
1826
     conditional jump that is required due to the current CFG shape.  */
1827
  if (single_pred_p (src1)
1828
      && FORWARDER_BLOCK_P (src1))
1829
    e1 = single_pred_edge (src1), src1 = e1->src;
1830
 
1831
  if (single_pred_p (src2)
1832
      && FORWARDER_BLOCK_P (src2))
1833
    e2 = single_pred_edge (src2), src2 = e2->src;
1834
 
1835
  /* Nothing to do if we reach ENTRY, or a common source block.  */
1836
  if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1837
    return false;
1838
  if (src1 == src2)
1839
    return false;
1840
 
1841
  /* Seeing more than 1 forwarder blocks would confuse us later...  */
1842
  if (FORWARDER_BLOCK_P (e1->dest)
1843
      && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1844
    return false;
1845
 
1846
  if (FORWARDER_BLOCK_P (e2->dest)
1847
      && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1848
    return false;
1849
 
1850
  /* Likewise with dead code (possibly newly created by the other optimizations
1851
     of cfg_cleanup).  */
1852
  if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1853
    return false;
1854
 
1855
  /* Look for the common insn sequence, part the first ...  */
1856
  if (!outgoing_edges_match (mode, src1, src2))
1857
    return false;
1858
 
1859
  /* ... and part the second.  */
1860
  nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1861
 
1862
  osrc1 = src1;
1863
  osrc2 = src2;
1864
  if (newpos1 != NULL_RTX)
1865
    src1 = BLOCK_FOR_INSN (newpos1);
1866
  if (newpos2 != NULL_RTX)
1867
    src2 = BLOCK_FOR_INSN (newpos2);
1868
 
1869
  if (dir == dir_backward)
1870
    {
1871
#define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1872
      SWAP (basic_block, osrc1, osrc2);
1873
      SWAP (basic_block, src1, src2);
1874
      SWAP (edge, e1, e2);
1875
      SWAP (rtx, newpos1, newpos2);
1876
#undef SWAP
1877
    }
1878
 
1879
  /* Don't proceed with the crossjump unless we found a sufficient number
1880
     of matching instructions or the 'from' block was totally matched
1881
     (such that its predecessors will hopefully be redirected and the
1882
     block removed).  */
1883
  if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1884
      && (newpos1 != BB_HEAD (src1)))
1885
    return false;
1886
 
1887
  /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1888
  if (block_has_preserve_label (e1->dest)
1889
      && (e1->flags & EDGE_ABNORMAL))
1890
    return false;
1891
 
1892
  /* Here we know that the insns in the end of SRC1 which are common with SRC2
1893
     will be deleted.
1894
     If we have tablejumps in the end of SRC1 and SRC2
1895
     they have been already compared for equivalence in outgoing_edges_match ()
1896
     so replace the references to TABLE1 by references to TABLE2.  */
1897
    {
1898
      rtx label1, label2;
1899
      rtx table1, table2;
1900
 
1901
      if (tablejump_p (BB_END (osrc1), &label1, &table1)
1902
          && tablejump_p (BB_END (osrc2), &label2, &table2)
1903
          && label1 != label2)
1904
        {
1905
          replace_label_data rr;
1906
          rtx insn;
1907
 
1908
          /* Replace references to LABEL1 with LABEL2.  */
1909
          rr.r1 = label1;
1910
          rr.r2 = label2;
1911
          rr.update_label_nuses = true;
1912
          for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1913
            {
1914
              /* Do not replace the label in SRC1->END because when deleting
1915
                 a block whose end is a tablejump, the tablejump referenced
1916
                 from the instruction is deleted too.  */
1917
              if (insn != BB_END (osrc1))
1918
                for_each_rtx (&insn, replace_label, &rr);
1919
            }
1920
        }
1921
    }
1922
 
1923
  /* Avoid splitting if possible.  We must always split when SRC2 has
1924
     EH predecessor edges, or we may end up with basic blocks with both
1925
     normal and EH predecessor edges.  */
1926
  if (newpos2 == BB_HEAD (src2)
1927
      && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1928
    redirect_to = src2;
1929
  else
1930
    {
1931
      if (newpos2 == BB_HEAD (src2))
1932
        {
1933
          /* Skip possible basic block header.  */
1934
          if (LABEL_P (newpos2))
1935
            newpos2 = NEXT_INSN (newpos2);
1936
          while (DEBUG_INSN_P (newpos2))
1937
            newpos2 = NEXT_INSN (newpos2);
1938
          if (NOTE_P (newpos2))
1939
            newpos2 = NEXT_INSN (newpos2);
1940
          while (DEBUG_INSN_P (newpos2))
1941
            newpos2 = NEXT_INSN (newpos2);
1942
        }
1943
 
1944
      if (dump_file)
1945
        fprintf (dump_file, "Splitting bb %i before %i insns\n",
1946
                 src2->index, nmatch);
1947
      redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1948
    }
1949
 
1950
  if (dump_file)
1951
    fprintf (dump_file,
1952
             "Cross jumping from bb %i to bb %i; %i common insns\n",
1953
             src1->index, src2->index, nmatch);
1954
 
1955
  /* We may have some registers visible through the block.  */
1956
  df_set_bb_dirty (redirect_to);
1957
 
1958
  if (osrc2 == src2)
1959
    redirect_edges_to = redirect_to;
1960
  else
1961
    redirect_edges_to = osrc2;
1962
 
1963
  /* Recompute the frequencies and counts of outgoing edges.  */
1964
  FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
1965
    {
1966
      edge s2;
1967
      edge_iterator ei;
1968
      basic_block d = s->dest;
1969
 
1970
      if (FORWARDER_BLOCK_P (d))
1971
        d = single_succ (d);
1972
 
1973
      FOR_EACH_EDGE (s2, ei, src1->succs)
1974
        {
1975
          basic_block d2 = s2->dest;
1976
          if (FORWARDER_BLOCK_P (d2))
1977
            d2 = single_succ (d2);
1978
          if (d == d2)
1979
            break;
1980
        }
1981
 
1982
      s->count += s2->count;
1983
 
1984
      /* Take care to update possible forwarder blocks.  We verified
1985
         that there is no more than one in the chain, so we can't run
1986
         into infinite loop.  */
1987
      if (FORWARDER_BLOCK_P (s->dest))
1988
        {
1989
          single_succ_edge (s->dest)->count += s2->count;
1990
          s->dest->count += s2->count;
1991
          s->dest->frequency += EDGE_FREQUENCY (s);
1992
        }
1993
 
1994
      if (FORWARDER_BLOCK_P (s2->dest))
1995
        {
1996
          single_succ_edge (s2->dest)->count -= s2->count;
1997
          if (single_succ_edge (s2->dest)->count < 0)
1998
            single_succ_edge (s2->dest)->count = 0;
1999
          s2->dest->count -= s2->count;
2000
          s2->dest->frequency -= EDGE_FREQUENCY (s);
2001
          if (s2->dest->frequency < 0)
2002
            s2->dest->frequency = 0;
2003
          if (s2->dest->count < 0)
2004
            s2->dest->count = 0;
2005
        }
2006
 
2007
      if (!redirect_edges_to->frequency && !src1->frequency)
2008
        s->probability = (s->probability + s2->probability) / 2;
2009
      else
2010
        s->probability
2011
          = ((s->probability * redirect_edges_to->frequency +
2012
              s2->probability * src1->frequency)
2013
             / (redirect_edges_to->frequency + src1->frequency));
2014
    }
2015
 
2016
  /* Adjust count and frequency for the block.  An earlier jump
2017
     threading pass may have left the profile in an inconsistent
2018
     state (see update_bb_profile_for_threading) so we must be
2019
     prepared for overflows.  */
2020
  tmp = redirect_to;
2021
  do
2022
    {
2023
      tmp->count += src1->count;
2024
      tmp->frequency += src1->frequency;
2025
      if (tmp->frequency > BB_FREQ_MAX)
2026
        tmp->frequency = BB_FREQ_MAX;
2027
      if (tmp == redirect_edges_to)
2028
        break;
2029
      tmp = find_fallthru_edge (tmp->succs)->dest;
2030
    }
2031
  while (true);
2032
  update_br_prob_note (redirect_edges_to);
2033
 
2034
  /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2035
 
2036
  /* Skip possible basic block header.  */
2037
  if (LABEL_P (newpos1))
2038
    newpos1 = NEXT_INSN (newpos1);
2039
 
2040
  while (DEBUG_INSN_P (newpos1))
2041
    newpos1 = NEXT_INSN (newpos1);
2042
 
2043
  if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2044
    newpos1 = NEXT_INSN (newpos1);
2045
 
2046
  while (DEBUG_INSN_P (newpos1))
2047
    newpos1 = NEXT_INSN (newpos1);
2048
 
2049
  redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2050
  to_remove = single_succ (redirect_from);
2051
 
2052
  redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2053
  delete_basic_block (to_remove);
2054
 
2055
  update_forwarder_flag (redirect_from);
2056
  if (redirect_to != src2)
2057
    update_forwarder_flag (src2);
2058
 
2059
  return true;
2060
}
2061
 
2062
/* Search the predecessors of BB for common insn sequences.  When found,
2063
   share code between them by redirecting control flow.  Return true if
2064
   any changes made.  */
2065
 
2066
static bool
2067
try_crossjump_bb (int mode, basic_block bb)
2068
{
2069
  edge e, e2, fallthru;
2070
  bool changed;
2071
  unsigned max, ix, ix2;
2072
 
2073
  /* Nothing to do if there is not at least two incoming edges.  */
2074
  if (EDGE_COUNT (bb->preds) < 2)
2075
    return false;
2076
 
2077
  /* Don't crossjump if this block ends in a computed jump,
2078
     unless we are optimizing for size.  */
2079
  if (optimize_bb_for_size_p (bb)
2080
      && bb != EXIT_BLOCK_PTR
2081
      && computed_jump_p (BB_END (bb)))
2082
    return false;
2083
 
2084
  /* If we are partitioning hot/cold basic blocks, we don't want to
2085
     mess up unconditional or indirect jumps that cross between hot
2086
     and cold sections.
2087
 
2088
     Basic block partitioning may result in some jumps that appear to
2089
     be optimizable (or blocks that appear to be mergeable), but which really
2090
     must be left untouched (they are required to make it safely across
2091
     partition boundaries).  See the comments at the top of
2092
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2093
 
2094
  if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2095
                                        BB_PARTITION (EDGE_PRED (bb, 1)->src)
2096
      || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2097
    return false;
2098
 
2099
  /* It is always cheapest to redirect a block that ends in a branch to
2100
     a block that falls through into BB, as that adds no branches to the
2101
     program.  We'll try that combination first.  */
2102
  fallthru = NULL;
2103
  max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2104
 
2105
  if (EDGE_COUNT (bb->preds) > max)
2106
    return false;
2107
 
2108
  fallthru = find_fallthru_edge (bb->preds);
2109
 
2110
  changed = false;
2111
  for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2112
    {
2113
      e = EDGE_PRED (bb, ix);
2114
      ix++;
2115
 
2116
      /* As noted above, first try with the fallthru predecessor (or, a
2117
         fallthru predecessor if we are in cfglayout mode).  */
2118
      if (fallthru)
2119
        {
2120
          /* Don't combine the fallthru edge into anything else.
2121
             If there is a match, we'll do it the other way around.  */
2122
          if (e == fallthru)
2123
            continue;
2124
          /* If nothing changed since the last attempt, there is nothing
2125
             we can do.  */
2126
          if (!first_pass
2127
              && !((e->src->flags & BB_MODIFIED)
2128
                   || (fallthru->src->flags & BB_MODIFIED)))
2129
            continue;
2130
 
2131
          if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2132
            {
2133
              changed = true;
2134
              ix = 0;
2135
              continue;
2136
            }
2137
        }
2138
 
2139
      /* Non-obvious work limiting check: Recognize that we're going
2140
         to call try_crossjump_bb on every basic block.  So if we have
2141
         two blocks with lots of outgoing edges (a switch) and they
2142
         share lots of common destinations, then we would do the
2143
         cross-jump check once for each common destination.
2144
 
2145
         Now, if the blocks actually are cross-jump candidates, then
2146
         all of their destinations will be shared.  Which means that
2147
         we only need check them for cross-jump candidacy once.  We
2148
         can eliminate redundant checks of crossjump(A,B) by arbitrarily
2149
         choosing to do the check from the block for which the edge
2150
         in question is the first successor of A.  */
2151
      if (EDGE_SUCC (e->src, 0) != e)
2152
        continue;
2153
 
2154
      for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2155
        {
2156
          e2 = EDGE_PRED (bb, ix2);
2157
 
2158
          if (e2 == e)
2159
            continue;
2160
 
2161
          /* We've already checked the fallthru edge above.  */
2162
          if (e2 == fallthru)
2163
            continue;
2164
 
2165
          /* The "first successor" check above only prevents multiple
2166
             checks of crossjump(A,B).  In order to prevent redundant
2167
             checks of crossjump(B,A), require that A be the block
2168
             with the lowest index.  */
2169
          if (e->src->index > e2->src->index)
2170
            continue;
2171
 
2172
          /* If nothing changed since the last attempt, there is nothing
2173
             we can do.  */
2174
          if (!first_pass
2175
              && !((e->src->flags & BB_MODIFIED)
2176
                   || (e2->src->flags & BB_MODIFIED)))
2177
            continue;
2178
 
2179
          /* Both e and e2 are not fallthru edges, so we can crossjump in either
2180
             direction.  */
2181
          if (try_crossjump_to_edge (mode, e, e2, dir_both))
2182
            {
2183
              changed = true;
2184
              ix = 0;
2185
              break;
2186
            }
2187
        }
2188
    }
2189
 
2190
  if (changed)
2191
    crossjumps_occured = true;
2192
 
2193
  return changed;
2194
}
2195
 
2196
/* Search the successors of BB for common insn sequences.  When found,
2197
   share code between them by moving it across the basic block
2198
   boundary.  Return true if any changes made.  */
2199
 
2200
static bool
2201
try_head_merge_bb (basic_block bb)
2202
{
2203
  basic_block final_dest_bb = NULL;
2204
  int max_match = INT_MAX;
2205
  edge e0;
2206
  rtx *headptr, *currptr, *nextptr;
2207
  bool changed, moveall;
2208
  unsigned ix;
2209
  rtx e0_last_head, cond, move_before;
2210
  unsigned nedges = EDGE_COUNT (bb->succs);
2211
  rtx jump = BB_END (bb);
2212
  regset live, live_union;
2213
 
2214
  /* Nothing to do if there is not at least two outgoing edges.  */
2215
  if (nedges < 2)
2216
    return false;
2217
 
2218
  /* Don't crossjump if this block ends in a computed jump,
2219
     unless we are optimizing for size.  */
2220
  if (optimize_bb_for_size_p (bb)
2221
      && bb != EXIT_BLOCK_PTR
2222
      && computed_jump_p (BB_END (bb)))
2223
    return false;
2224
 
2225
  cond = get_condition (jump, &move_before, true, false);
2226
  if (cond == NULL_RTX)
2227
    {
2228
#ifdef HAVE_cc0
2229
      if (reg_mentioned_p (cc0_rtx, jump))
2230
        move_before = prev_nonnote_nondebug_insn (jump);
2231
      else
2232
#endif
2233
        move_before = jump;
2234
    }
2235
 
2236
  for (ix = 0; ix < nedges; ix++)
2237
    if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
2238
      return false;
2239
 
2240
  for (ix = 0; ix < nedges; ix++)
2241
    {
2242
      edge e = EDGE_SUCC (bb, ix);
2243
      basic_block other_bb = e->dest;
2244
 
2245
      if (df_get_bb_dirty (other_bb))
2246
        {
2247
          block_was_dirty = true;
2248
          return false;
2249
        }
2250
 
2251
      if (e->flags & EDGE_ABNORMAL)
2252
        return false;
2253
 
2254
      /* Normally, all destination blocks must only be reachable from this
2255
         block, i.e. they must have one incoming edge.
2256
 
2257
         There is one special case we can handle, that of multiple consecutive
2258
         jumps where the first jumps to one of the targets of the second jump.
2259
         This happens frequently in switch statements for default labels.
2260
         The structure is as follows:
2261
         FINAL_DEST_BB
2262
         ....
2263
         if (cond) jump A;
2264
         fall through
2265
         BB
2266
         jump with targets A, B, C, D...
2267
         A
2268
         has two incoming edges, from FINAL_DEST_BB and BB
2269
 
2270
         In this case, we can try to move the insns through BB and into
2271
         FINAL_DEST_BB.  */
2272
      if (EDGE_COUNT (other_bb->preds) != 1)
2273
        {
2274
          edge incoming_edge, incoming_bb_other_edge;
2275
          edge_iterator ei;
2276
 
2277
          if (final_dest_bb != NULL
2278
              || EDGE_COUNT (other_bb->preds) != 2)
2279
            return false;
2280
 
2281
          /* We must be able to move the insns across the whole block.  */
2282
          move_before = BB_HEAD (bb);
2283
          while (!NONDEBUG_INSN_P (move_before))
2284
            move_before = NEXT_INSN (move_before);
2285
 
2286
          if (EDGE_COUNT (bb->preds) != 1)
2287
            return false;
2288
          incoming_edge = EDGE_PRED (bb, 0);
2289
          final_dest_bb = incoming_edge->src;
2290
          if (EDGE_COUNT (final_dest_bb->succs) != 2)
2291
            return false;
2292
          FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2293
            if (incoming_bb_other_edge != incoming_edge)
2294
              break;
2295
          if (incoming_bb_other_edge->dest != other_bb)
2296
            return false;
2297
        }
2298
    }
2299
 
2300
  e0 = EDGE_SUCC (bb, 0);
2301
  e0_last_head = NULL_RTX;
2302
  changed = false;
2303
 
2304
  for (ix = 1; ix < nedges; ix++)
2305
    {
2306
      edge e = EDGE_SUCC (bb, ix);
2307
      rtx e0_last, e_last;
2308
      int nmatch;
2309
 
2310
      nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2311
                                                 &e0_last, &e_last, 0);
2312
      if (nmatch == 0)
2313
        return false;
2314
 
2315
      if (nmatch < max_match)
2316
        {
2317
          max_match = nmatch;
2318
          e0_last_head = e0_last;
2319
        }
2320
    }
2321
 
2322
  /* If we matched an entire block, we probably have to avoid moving the
2323
     last insn.  */
2324
  if (max_match > 0
2325
      && e0_last_head == BB_END (e0->dest)
2326
      && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2327
          || control_flow_insn_p (e0_last_head)))
2328
    {
2329
      max_match--;
2330
      if (max_match == 0)
2331
        return false;
2332
      do
2333
        e0_last_head = prev_real_insn (e0_last_head);
2334
      while (DEBUG_INSN_P (e0_last_head));
2335
    }
2336
 
2337
  if (max_match == 0)
2338
    return false;
2339
 
2340
  /* We must find a union of the live registers at each of the end points.  */
2341
  live = BITMAP_ALLOC (NULL);
2342
  live_union = BITMAP_ALLOC (NULL);
2343
 
2344
  currptr = XNEWVEC (rtx, nedges);
2345
  headptr = XNEWVEC (rtx, nedges);
2346
  nextptr = XNEWVEC (rtx, nedges);
2347
 
2348
  for (ix = 0; ix < nedges; ix++)
2349
    {
2350
      int j;
2351
      basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2352
      rtx head = BB_HEAD (merge_bb);
2353
 
2354
      while (!NONDEBUG_INSN_P (head))
2355
        head = NEXT_INSN (head);
2356
      headptr[ix] = head;
2357
      currptr[ix] = head;
2358
 
2359
      /* Compute the end point and live information  */
2360
      for (j = 1; j < max_match; j++)
2361
        do
2362
          head = NEXT_INSN (head);
2363
        while (!NONDEBUG_INSN_P (head));
2364
      simulate_backwards_to_point (merge_bb, live, head);
2365
      IOR_REG_SET (live_union, live);
2366
    }
2367
 
2368
  /* If we're moving across two blocks, verify the validity of the
2369
     first move, then adjust the target and let the loop below deal
2370
     with the final move.  */
2371
  if (final_dest_bb != NULL)
2372
    {
2373
      rtx move_upto;
2374
 
2375
      moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2376
                                       jump, e0->dest, live_union,
2377
                                       NULL, &move_upto);
2378
      if (!moveall)
2379
        {
2380
          if (move_upto == NULL_RTX)
2381
            goto out;
2382
 
2383
          while (e0_last_head != move_upto)
2384
            {
2385
              df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2386
                                              live_union);
2387
              e0_last_head = PREV_INSN (e0_last_head);
2388
            }
2389
        }
2390
      if (e0_last_head == NULL_RTX)
2391
        goto out;
2392
 
2393
      jump = BB_END (final_dest_bb);
2394
      cond = get_condition (jump, &move_before, true, false);
2395
      if (cond == NULL_RTX)
2396
        {
2397
#ifdef HAVE_cc0
2398
          if (reg_mentioned_p (cc0_rtx, jump))
2399
            move_before = prev_nonnote_nondebug_insn (jump);
2400
          else
2401
#endif
2402
            move_before = jump;
2403
        }
2404
    }
2405
 
2406
  do
2407
    {
2408
      rtx move_upto;
2409
      moveall = can_move_insns_across (currptr[0], e0_last_head,
2410
                                       move_before, jump, e0->dest, live_union,
2411
                                       NULL, &move_upto);
2412
      if (!moveall && move_upto == NULL_RTX)
2413
        {
2414
          if (jump == move_before)
2415
            break;
2416
 
2417
          /* Try again, using a different insertion point.  */
2418
          move_before = jump;
2419
 
2420
#ifdef HAVE_cc0
2421
          /* Don't try moving before a cc0 user, as that may invalidate
2422
             the cc0.  */
2423
          if (reg_mentioned_p (cc0_rtx, jump))
2424
            break;
2425
#endif
2426
 
2427
          continue;
2428
        }
2429
 
2430
      if (final_dest_bb && !moveall)
2431
        /* We haven't checked whether a partial move would be OK for the first
2432
           move, so we have to fail this case.  */
2433
        break;
2434
 
2435
      changed = true;
2436
      for (;;)
2437
        {
2438
          if (currptr[0] == move_upto)
2439
            break;
2440
          for (ix = 0; ix < nedges; ix++)
2441
            {
2442
              rtx curr = currptr[ix];
2443
              do
2444
                curr = NEXT_INSN (curr);
2445
              while (!NONDEBUG_INSN_P (curr));
2446
              currptr[ix] = curr;
2447
            }
2448
        }
2449
 
2450
      /* If we can't currently move all of the identical insns, remember
2451
         each insn after the range that we'll merge.  */
2452
      if (!moveall)
2453
        for (ix = 0; ix < nedges; ix++)
2454
          {
2455
            rtx curr = currptr[ix];
2456
            do
2457
              curr = NEXT_INSN (curr);
2458
            while (!NONDEBUG_INSN_P (curr));
2459
            nextptr[ix] = curr;
2460
          }
2461
 
2462
      reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2463
      df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2464
      if (final_dest_bb != NULL)
2465
        df_set_bb_dirty (final_dest_bb);
2466
      df_set_bb_dirty (bb);
2467
      for (ix = 1; ix < nedges; ix++)
2468
        {
2469
          df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2470
          delete_insn_chain (headptr[ix], currptr[ix], false);
2471
        }
2472
      if (!moveall)
2473
        {
2474
          if (jump == move_before)
2475
            break;
2476
 
2477
          /* For the unmerged insns, try a different insertion point.  */
2478
          move_before = jump;
2479
 
2480
#ifdef HAVE_cc0
2481
          /* Don't try moving before a cc0 user, as that may invalidate
2482
             the cc0.  */
2483
          if (reg_mentioned_p (cc0_rtx, jump))
2484
            break;
2485
#endif
2486
 
2487
          for (ix = 0; ix < nedges; ix++)
2488
            currptr[ix] = headptr[ix] = nextptr[ix];
2489
        }
2490
    }
2491
  while (!moveall);
2492
 
2493
 out:
2494
  free (currptr);
2495
  free (headptr);
2496
  free (nextptr);
2497
 
2498
  crossjumps_occured |= changed;
2499
 
2500
  return changed;
2501
}
2502
 
2503
/* Return true if BB contains just bb note, or bb note followed
2504
   by only DEBUG_INSNs.  */
2505
 
2506
static bool
2507
trivially_empty_bb_p (basic_block bb)
2508
{
2509
  rtx insn = BB_END (bb);
2510
 
2511
  while (1)
2512
    {
2513
      if (insn == BB_HEAD (bb))
2514
        return true;
2515
      if (!DEBUG_INSN_P (insn))
2516
        return false;
2517
      insn = PREV_INSN (insn);
2518
    }
2519
}
2520
 
2521
/* Do simple CFG optimizations - basic block merging, simplifying of jump
2522
   instructions etc.  Return nonzero if changes were made.  */
2523
 
2524
static bool
2525
try_optimize_cfg (int mode)
2526
{
2527
  bool changed_overall = false;
2528
  bool changed;
2529
  int iterations = 0;
2530
  basic_block bb, b, next;
2531
 
2532
  if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2533
    clear_bb_flags ();
2534
 
2535
  crossjumps_occured = false;
2536
 
2537
  FOR_EACH_BB (bb)
2538
    update_forwarder_flag (bb);
2539
 
2540
  if (! targetm.cannot_modify_jumps_p ())
2541
    {
2542
      first_pass = true;
2543
      /* Attempt to merge blocks as made possible by edge removal.  If
2544
         a block has only one successor, and the successor has only
2545
         one predecessor, they may be combined.  */
2546
      do
2547
        {
2548
          block_was_dirty = false;
2549
          changed = false;
2550
          iterations++;
2551
 
2552
          if (dump_file)
2553
            fprintf (dump_file,
2554
                     "\n\ntry_optimize_cfg iteration %i\n\n",
2555
                     iterations);
2556
 
2557
          for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
2558
            {
2559
              basic_block c;
2560
              edge s;
2561
              bool changed_here = false;
2562
 
2563
              /* Delete trivially dead basic blocks.  This is either
2564
                 blocks with no predecessors, or empty blocks with no
2565
                 successors.  However if the empty block with no
2566
                 successors is the successor of the ENTRY_BLOCK, it is
2567
                 kept.  This ensures that the ENTRY_BLOCK will have a
2568
                 successor which is a precondition for many RTL
2569
                 passes.  Empty blocks may result from expanding
2570
                 __builtin_unreachable ().  */
2571
              if (EDGE_COUNT (b->preds) == 0
2572
                  || (EDGE_COUNT (b->succs) == 0
2573
                      && trivially_empty_bb_p (b)
2574
                      && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
2575
                {
2576
                  c = b->prev_bb;
2577
                  if (EDGE_COUNT (b->preds) > 0)
2578
                    {
2579
                      edge e;
2580
                      edge_iterator ei;
2581
 
2582
                      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2583
                        {
2584
                          if (b->il.rtl->footer
2585
                              && BARRIER_P (b->il.rtl->footer))
2586
                            FOR_EACH_EDGE (e, ei, b->preds)
2587
                              if ((e->flags & EDGE_FALLTHRU)
2588
                                  && e->src->il.rtl->footer == NULL)
2589
                                {
2590
                                  if (b->il.rtl->footer)
2591
                                    {
2592
                                      e->src->il.rtl->footer = b->il.rtl->footer;
2593
                                      b->il.rtl->footer = NULL;
2594
                                    }
2595
                                  else
2596
                                    {
2597
                                      start_sequence ();
2598
                                      e->src->il.rtl->footer = emit_barrier ();
2599
                                      end_sequence ();
2600
                                    }
2601
                                }
2602
                        }
2603
                      else
2604
                        {
2605
                          rtx last = get_last_bb_insn (b);
2606
                          if (last && BARRIER_P (last))
2607
                            FOR_EACH_EDGE (e, ei, b->preds)
2608
                              if ((e->flags & EDGE_FALLTHRU))
2609
                                emit_barrier_after (BB_END (e->src));
2610
                        }
2611
                    }
2612
                  delete_basic_block (b);
2613
                  changed = true;
2614
                  /* Avoid trying to remove ENTRY_BLOCK_PTR.  */
2615
                  b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
2616
                  continue;
2617
                }
2618
 
2619
              /* Remove code labels no longer used.  */
2620
              if (single_pred_p (b)
2621
                  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2622
                  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2623
                  && LABEL_P (BB_HEAD (b))
2624
                  /* If the previous block ends with a branch to this
2625
                     block, we can't delete the label.  Normally this
2626
                     is a condjump that is yet to be simplified, but
2627
                     if CASE_DROPS_THRU, this can be a tablejump with
2628
                     some element going to the same place as the
2629
                     default (fallthru).  */
2630
                  && (single_pred (b) == ENTRY_BLOCK_PTR
2631
                      || !JUMP_P (BB_END (single_pred (b)))
2632
                      || ! label_is_jump_target_p (BB_HEAD (b),
2633
                                                   BB_END (single_pred (b)))))
2634
                {
2635
                  rtx label = BB_HEAD (b);
2636
 
2637
                  delete_insn_chain (label, label, false);
2638
                  /* If the case label is undeletable, move it after the
2639
                     BASIC_BLOCK note.  */
2640
                  if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2641
                    {
2642
                      rtx bb_note = NEXT_INSN (BB_HEAD (b));
2643
 
2644
                      reorder_insns_nobb (label, label, bb_note);
2645
                      BB_HEAD (b) = bb_note;
2646
                      if (BB_END (b) == bb_note)
2647
                        BB_END (b) = label;
2648
                    }
2649
                  if (dump_file)
2650
                    fprintf (dump_file, "Deleted label in block %i.\n",
2651
                             b->index);
2652
                }
2653
 
2654
              /* If we fall through an empty block, we can remove it.  */
2655
              if (!(mode & CLEANUP_CFGLAYOUT)
2656
                  && single_pred_p (b)
2657
                  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2658
                  && !LABEL_P (BB_HEAD (b))
2659
                  && FORWARDER_BLOCK_P (b)
2660
                  /* Note that forwarder_block_p true ensures that
2661
                     there is a successor for this block.  */
2662
                  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2663
                  && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2664
                {
2665
                  if (dump_file)
2666
                    fprintf (dump_file,
2667
                             "Deleting fallthru block %i.\n",
2668
                             b->index);
2669
 
2670
                  c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2671
                  redirect_edge_succ_nodup (single_pred_edge (b),
2672
                                            single_succ (b));
2673
                  delete_basic_block (b);
2674
                  changed = true;
2675
                  b = c;
2676
                  continue;
2677
                }
2678
 
2679
              /* Merge B with its single successor, if any.  */
2680
              if (single_succ_p (b)
2681
                  && (s = single_succ_edge (b))
2682
                  && !(s->flags & EDGE_COMPLEX)
2683
                  && (c = s->dest) != EXIT_BLOCK_PTR
2684
                  && single_pred_p (c)
2685
                  && b != c)
2686
                {
2687
                  /* When not in cfg_layout mode use code aware of reordering
2688
                     INSN.  This code possibly creates new basic blocks so it
2689
                     does not fit merge_blocks interface and is kept here in
2690
                     hope that it will become useless once more of compiler
2691
                     is transformed to use cfg_layout mode.  */
2692
 
2693
                  if ((mode & CLEANUP_CFGLAYOUT)
2694
                      && can_merge_blocks_p (b, c))
2695
                    {
2696
                      merge_blocks (b, c);
2697
                      update_forwarder_flag (b);
2698
                      changed_here = true;
2699
                    }
2700
                  else if (!(mode & CLEANUP_CFGLAYOUT)
2701
                           /* If the jump insn has side effects,
2702
                              we can't kill the edge.  */
2703
                           && (!JUMP_P (BB_END (b))
2704
                               || (reload_completed
2705
                                   ? simplejump_p (BB_END (b))
2706
                                   : (onlyjump_p (BB_END (b))
2707
                                      && !tablejump_p (BB_END (b),
2708
                                                       NULL, NULL))))
2709
                           && (next = merge_blocks_move (s, b, c, mode)))
2710
                      {
2711
                        b = next;
2712
                        changed_here = true;
2713
                      }
2714
                }
2715
 
2716
              /* Simplify branch over branch.  */
2717
              if ((mode & CLEANUP_EXPENSIVE)
2718
                   && !(mode & CLEANUP_CFGLAYOUT)
2719
                   && try_simplify_condjump (b))
2720
                changed_here = true;
2721
 
2722
              /* If B has a single outgoing edge, but uses a
2723
                 non-trivial jump instruction without side-effects, we
2724
                 can either delete the jump entirely, or replace it
2725
                 with a simple unconditional jump.  */
2726
              if (single_succ_p (b)
2727
                  && single_succ (b) != EXIT_BLOCK_PTR
2728
                  && onlyjump_p (BB_END (b))
2729
                  && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2730
                  && try_redirect_by_replacing_jump (single_succ_edge (b),
2731
                                                     single_succ (b),
2732
                                                     (mode & CLEANUP_CFGLAYOUT) != 0))
2733
                {
2734
                  update_forwarder_flag (b);
2735
                  changed_here = true;
2736
                }
2737
 
2738
              /* Simplify branch to branch.  */
2739
              if (try_forward_edges (mode, b))
2740
                {
2741
                  update_forwarder_flag (b);
2742
                  changed_here = true;
2743
                }
2744
 
2745
              /* Look for shared code between blocks.  */
2746
              if ((mode & CLEANUP_CROSSJUMP)
2747
                  && try_crossjump_bb (mode, b))
2748
                changed_here = true;
2749
 
2750
              if ((mode & CLEANUP_CROSSJUMP)
2751
                  /* This can lengthen register lifetimes.  Do it only after
2752
                     reload.  */
2753
                  && reload_completed
2754
                  && try_head_merge_bb (b))
2755
                changed_here = true;
2756
 
2757
              /* Don't get confused by the index shift caused by
2758
                 deleting blocks.  */
2759
              if (!changed_here)
2760
                b = b->next_bb;
2761
              else
2762
                changed = true;
2763
            }
2764
 
2765
          if ((mode & CLEANUP_CROSSJUMP)
2766
              && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2767
            changed = true;
2768
 
2769
          if (block_was_dirty)
2770
            {
2771
              /* This should only be set by head-merging.  */
2772
              gcc_assert (mode & CLEANUP_CROSSJUMP);
2773
              df_analyze ();
2774
            }
2775
 
2776
#ifdef ENABLE_CHECKING
2777
          if (changed)
2778
            verify_flow_info ();
2779
#endif
2780
 
2781
          changed_overall |= changed;
2782
          first_pass = false;
2783
        }
2784
      while (changed);
2785
    }
2786
 
2787
  FOR_ALL_BB (b)
2788
    b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2789
 
2790
  return changed_overall;
2791
}
2792
 
2793
/* Delete all unreachable basic blocks.  */
2794
 
2795
bool
2796
delete_unreachable_blocks (void)
2797
{
2798
  bool changed = false;
2799
  basic_block b, prev_bb;
2800
 
2801
  find_unreachable_blocks ();
2802
 
2803
  /* When we're in GIMPLE mode and there may be debug insns, we should
2804
     delete blocks in reverse dominator order, so as to get a chance
2805
     to substitute all released DEFs into debug stmts.  If we don't
2806
     have dominators information, walking blocks backward gets us a
2807
     better chance of retaining most debug information than
2808
     otherwise.  */
2809
  if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2810
      && dom_info_available_p (CDI_DOMINATORS))
2811
    {
2812
      for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2813
        {
2814
          prev_bb = b->prev_bb;
2815
 
2816
          if (!(b->flags & BB_REACHABLE))
2817
            {
2818
              /* Speed up the removal of blocks that don't dominate
2819
                 others.  Walking backwards, this should be the common
2820
                 case.  */
2821
              if (!first_dom_son (CDI_DOMINATORS, b))
2822
                delete_basic_block (b);
2823
              else
2824
                {
2825
                  VEC (basic_block, heap) *h
2826
                    = get_all_dominated_blocks (CDI_DOMINATORS, b);
2827
 
2828
                  while (VEC_length (basic_block, h))
2829
                    {
2830
                      b = VEC_pop (basic_block, h);
2831
 
2832
                      prev_bb = b->prev_bb;
2833
 
2834
                      gcc_assert (!(b->flags & BB_REACHABLE));
2835
 
2836
                      delete_basic_block (b);
2837
                    }
2838
 
2839
                  VEC_free (basic_block, heap, h);
2840
                }
2841
 
2842
              changed = true;
2843
            }
2844
        }
2845
    }
2846
  else
2847
    {
2848
      for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2849
        {
2850
          prev_bb = b->prev_bb;
2851
 
2852
          if (!(b->flags & BB_REACHABLE))
2853
            {
2854
              delete_basic_block (b);
2855
              changed = true;
2856
            }
2857
        }
2858
    }
2859
 
2860
  if (changed)
2861
    tidy_fallthru_edges ();
2862
  return changed;
2863
}
2864
 
2865
/* Delete any jump tables never referenced.  We can't delete them at the
2866
   time of removing tablejump insn as they are referenced by the preceding
2867
   insns computing the destination, so we delay deleting and garbagecollect
2868
   them once life information is computed.  */
2869
void
2870
delete_dead_jumptables (void)
2871
{
2872
  basic_block bb;
2873
 
2874
  /* A dead jump table does not belong to any basic block.  Scan insns
2875
     between two adjacent basic blocks.  */
2876
  FOR_EACH_BB (bb)
2877
    {
2878
      rtx insn, next;
2879
 
2880
      for (insn = NEXT_INSN (BB_END (bb));
2881
           insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2882
           insn = next)
2883
        {
2884
          next = NEXT_INSN (insn);
2885
          if (LABEL_P (insn)
2886
              && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2887
              && JUMP_TABLE_DATA_P (next))
2888
            {
2889
              rtx label = insn, jump = next;
2890
 
2891
              if (dump_file)
2892
                fprintf (dump_file, "Dead jumptable %i removed\n",
2893
                         INSN_UID (insn));
2894
 
2895
              next = NEXT_INSN (next);
2896
              delete_insn (jump);
2897
              delete_insn (label);
2898
            }
2899
        }
2900
    }
2901
}
2902
 
2903
 
2904
/* Tidy the CFG by deleting unreachable code and whatnot.  */
2905
 
2906
bool
2907
cleanup_cfg (int mode)
2908
{
2909
  bool changed = false;
2910
 
2911
  /* Set the cfglayout mode flag here.  We could update all the callers
2912
     but that is just inconvenient, especially given that we eventually
2913
     want to have cfglayout mode as the default.  */
2914
  if (current_ir_type () == IR_RTL_CFGLAYOUT)
2915
    mode |= CLEANUP_CFGLAYOUT;
2916
 
2917
  timevar_push (TV_CLEANUP_CFG);
2918
  if (delete_unreachable_blocks ())
2919
    {
2920
      changed = true;
2921
      /* We've possibly created trivially dead code.  Cleanup it right
2922
         now to introduce more opportunities for try_optimize_cfg.  */
2923
      if (!(mode & (CLEANUP_NO_INSN_DEL))
2924
          && !reload_completed)
2925
        delete_trivially_dead_insns (get_insns (), max_reg_num ());
2926
    }
2927
 
2928
  compact_blocks ();
2929
 
2930
  /* To tail-merge blocks ending in the same noreturn function (e.g.
2931
     a call to abort) we have to insert fake edges to exit.  Do this
2932
     here once.  The fake edges do not interfere with any other CFG
2933
     cleanups.  */
2934
  if (mode & CLEANUP_CROSSJUMP)
2935
    add_noreturn_fake_exit_edges ();
2936
 
2937
  if (!dbg_cnt (cfg_cleanup))
2938
    return changed;
2939
 
2940
  while (try_optimize_cfg (mode))
2941
    {
2942
      delete_unreachable_blocks (), changed = true;
2943
      if (!(mode & CLEANUP_NO_INSN_DEL))
2944
        {
2945
          /* Try to remove some trivially dead insns when doing an expensive
2946
             cleanup.  But delete_trivially_dead_insns doesn't work after
2947
             reload (it only handles pseudos) and run_fast_dce is too costly
2948
             to run in every iteration.
2949
 
2950
             For effective cross jumping, we really want to run a fast DCE to
2951
             clean up any dead conditions, or they get in the way of performing
2952
             useful tail merges.
2953
 
2954
             Other transformations in cleanup_cfg are not so sensitive to dead
2955
             code, so delete_trivially_dead_insns or even doing nothing at all
2956
             is good enough.  */
2957
          if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2958
              && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2959
            break;
2960
          if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
2961
            run_fast_dce ();
2962
        }
2963
      else
2964
        break;
2965
    }
2966
 
2967
  if (mode & CLEANUP_CROSSJUMP)
2968
    remove_fake_exit_edges ();
2969
 
2970
  /* Don't call delete_dead_jumptables in cfglayout mode, because
2971
     that function assumes that jump tables are in the insns stream.
2972
     But we also don't _have_ to delete dead jumptables in cfglayout
2973
     mode because we shouldn't even be looking at things that are
2974
     not in a basic block.  Dead jumptables are cleaned up when
2975
     going out of cfglayout mode.  */
2976
  if (!(mode & CLEANUP_CFGLAYOUT))
2977
    delete_dead_jumptables ();
2978
 
2979
  timevar_pop (TV_CLEANUP_CFG);
2980
 
2981
  return changed;
2982
}
2983
 
2984
static unsigned int
2985
rest_of_handle_jump (void)
2986
{
2987
  if (crtl->tail_call_emit)
2988
    fixup_tail_calls ();
2989
  return 0;
2990
}
2991
 
2992
struct rtl_opt_pass pass_jump =
2993
{
2994
 {
2995
  RTL_PASS,
2996
  "sibling",                            /* name */
2997
  NULL,                                 /* gate */
2998
  rest_of_handle_jump,                  /* execute */
2999
  NULL,                                 /* sub */
3000
  NULL,                                 /* next */
3001
  0,                                    /* static_pass_number */
3002
  TV_JUMP,                              /* tv_id */
3003
  0,                                    /* properties_required */
3004
  0,                                    /* properties_provided */
3005
  0,                                    /* properties_destroyed */
3006
  TODO_ggc_collect,                     /* todo_flags_start */
3007
  TODO_verify_flow,                     /* todo_flags_finish */
3008
 }
3009
};
3010
 
3011
 
3012
static unsigned int
3013
rest_of_handle_jump2 (void)
3014
{
3015
  delete_trivially_dead_insns (get_insns (), max_reg_num ());
3016
  if (dump_file)
3017
    dump_flow_info (dump_file, dump_flags);
3018
  cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3019
               | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3020
  return 0;
3021
}
3022
 
3023
 
3024
struct rtl_opt_pass pass_jump2 =
3025
{
3026
 {
3027
  RTL_PASS,
3028
  "jump",                               /* name */
3029
  NULL,                                 /* gate */
3030
  rest_of_handle_jump2,                 /* execute */
3031
  NULL,                                 /* sub */
3032
  NULL,                                 /* next */
3033
  0,                                    /* static_pass_number */
3034
  TV_JUMP,                              /* tv_id */
3035
  0,                                    /* properties_required */
3036
  0,                                    /* properties_provided */
3037
  0,                                    /* properties_destroyed */
3038
  TODO_ggc_collect,                     /* todo_flags_start */
3039
  TODO_verify_rtl_sharing,              /* todo_flags_finish */
3040
 }
3041
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.