OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [sched-ebb.c] - Blame information for rev 199

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Instruction scheduling pass.
2
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2007
4
   Free Software Foundation, Inc.
5
   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6
   and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
 
8
This file is part of GCC.
9
 
10
GCC is free software; you can redistribute it and/or modify it under
11
the terms of the GNU General Public License as published by the Free
12
Software Foundation; either version 3, or (at your option) any later
13
version.
14
 
15
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16
WARRANTY; without even the implied warranty of MERCHANTABILITY or
17
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18
for more details.
19
 
20
You should have received a copy of the GNU General Public License
21
along with GCC; see the file COPYING3.  If not see
22
<http://www.gnu.org/licenses/>.  */
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "tm.h"
28
#include "toplev.h"
29
#include "rtl.h"
30
#include "tm_p.h"
31
#include "hard-reg-set.h"
32
#include "regs.h"
33
#include "function.h"
34
#include "flags.h"
35
#include "insn-config.h"
36
#include "insn-attr.h"
37
#include "except.h"
38
#include "toplev.h"
39
#include "recog.h"
40
#include "cfglayout.h"
41
#include "params.h"
42
#include "sched-int.h"
43
#include "target.h"
44
#include "output.h"
45
 
46
/* The number of insns scheduled so far.  */
47
static int sched_n_insns;
48
 
49
/* The number of insns to be scheduled in total.  */
50
static int n_insns;
51
 
52
/* Set of blocks, that already have their dependencies calculated.  */
53
static bitmap_head dont_calc_deps;
54
/* Set of basic blocks, that are ebb heads of tails respectively.  */
55
static bitmap_head ebb_head, ebb_tail;
56
 
57
/* Last basic block in current ebb.  */
58
static basic_block last_bb;
59
 
60
/* Implementations of the sched_info functions for region scheduling.  */
61
static void init_ready_list (void);
62
static void begin_schedule_ready (rtx, rtx);
63
static int schedule_more_p (void);
64
static const char *ebb_print_insn (rtx, int);
65
static int rank (rtx, rtx);
66
static int contributes_to_priority (rtx, rtx);
67
static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
68
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
69
static void add_deps_for_risky_insns (rtx, rtx);
70
static basic_block schedule_ebb (rtx, rtx);
71
 
72
static void add_remove_insn (rtx, int);
73
static void add_block1 (basic_block, basic_block);
74
static basic_block advance_target_bb (basic_block, rtx);
75
static void fix_recovery_cfg (int, int, int);
76
 
77
#ifdef ENABLE_CHECKING
78
static int ebb_head_or_leaf_p (basic_block, int);
79
#endif
80
 
81
/* Return nonzero if there are more insns that should be scheduled.  */
82
 
83
static int
84
schedule_more_p (void)
85
{
86
  return sched_n_insns < n_insns;
87
}
88
 
89
/* Add all insns that are initially ready to the ready list READY.  Called
90
   once before scheduling a set of insns.  */
91
 
92
static void
93
init_ready_list (void)
94
{
95
  int n = 0;
96
  rtx prev_head = current_sched_info->prev_head;
97
  rtx next_tail = current_sched_info->next_tail;
98
  rtx insn;
99
 
100
  sched_n_insns = 0;
101
 
102
#if 0
103
  /* Print debugging information.  */
104
  if (sched_verbose >= 5)
105
    debug_dependencies ();
106
#endif
107
 
108
  /* Initialize ready list with all 'ready' insns in target block.
109
     Count number of insns in the target block being scheduled.  */
110
  for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
111
    {
112
      try_ready (insn);
113
      n++;
114
    }
115
 
116
  gcc_assert (n == n_insns);
117
}
118
 
119
/* INSN is being scheduled after LAST.  Update counters.  */
120
static void
121
begin_schedule_ready (rtx insn, rtx last)
122
{
123
  sched_n_insns++;
124
 
125
  if (BLOCK_FOR_INSN (insn) == last_bb
126
      /* INSN is a jump in the last block, ...  */
127
      && control_flow_insn_p (insn)
128
      /* that is going to be moved over some instructions.  */
129
      && last != PREV_INSN (insn))
130
    {
131
      edge e;
132
      edge_iterator ei;
133
      basic_block bb;
134
 
135
      /* An obscure special case, where we do have partially dead
136
         instruction scheduled after last control flow instruction.
137
         In this case we can create new basic block.  It is
138
         always exactly one basic block last in the sequence.  */
139
 
140
      FOR_EACH_EDGE (e, ei, last_bb->succs)
141
        if (e->flags & EDGE_FALLTHRU)
142
          break;
143
 
144
#ifdef ENABLE_CHECKING
145
      gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
146
 
147
      gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
148
                  && !IS_SPECULATION_CHECK_P (insn)
149
                  && BB_HEAD (last_bb) != insn
150
                  && BB_END (last_bb) == insn);
151
 
152
      {
153
        rtx x;
154
 
155
        x = NEXT_INSN (insn);
156
        if (e)
157
          gcc_assert (NOTE_P (x) || LABEL_P (x));
158
        else
159
          gcc_assert (BARRIER_P (x));
160
      }
161
#endif
162
 
163
      if (e)
164
        {
165
          bb = split_edge (e);
166
          gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
167
        }
168
      else
169
        /* Create an empty unreachable block after the INSN.  */
170
        bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
171
 
172
      /* split_edge () creates BB before E->DEST.  Keep in mind, that
173
         this operation extends scheduling region till the end of BB.
174
         Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
175
         of the scheduling region.  */
176
      current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
177
      gcc_assert (current_sched_info->next_tail);
178
 
179
      add_block (bb, last_bb);
180
      gcc_assert (last_bb == bb);
181
    }
182
}
183
 
184
/* Return a string that contains the insn uid and optionally anything else
185
   necessary to identify this insn in an output.  It's valid to use a
186
   static buffer for this.  The ALIGNED parameter should cause the string
187
   to be formatted so that multiple output lines will line up nicely.  */
188
 
189
static const char *
190
ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
191
{
192
  static char tmp[80];
193
 
194
  sprintf (tmp, "%4d", INSN_UID (insn));
195
  return tmp;
196
}
197
 
198
/* Compare priority of two insns.  Return a positive number if the second
199
   insn is to be preferred for scheduling, and a negative one if the first
200
   is to be preferred.  Zero if they are equally good.  */
201
 
202
static int
203
rank (rtx insn1, rtx insn2)
204
{
205
  basic_block bb1 = BLOCK_FOR_INSN (insn1);
206
  basic_block bb2 = BLOCK_FOR_INSN (insn2);
207
 
208
  if (bb1->count > bb2->count
209
      || bb1->frequency > bb2->frequency)
210
    return -1;
211
  if (bb1->count < bb2->count
212
      || bb1->frequency < bb2->frequency)
213
    return 1;
214
  return 0;
215
}
216
 
217
/* NEXT is an instruction that depends on INSN (a backward dependence);
218
   return nonzero if we should include this dependence in priority
219
   calculations.  */
220
 
221
static int
222
contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
223
                         rtx insn ATTRIBUTE_UNUSED)
224
{
225
  return 1;
226
}
227
 
228
 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
229
    conditionally set before INSN.  Store the set of registers that
230
    must be considered as used by this jump in USED and that of
231
    registers that must be considered as set in SET.  */
232
 
233
static void
234
compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
235
                               regset set)
236
{
237
  basic_block b = BLOCK_FOR_INSN (insn);
238
  edge e;
239
  edge_iterator ei;
240
 
241
  FOR_EACH_EDGE (e, ei, b->succs)
242
    if (e->flags & EDGE_FALLTHRU)
243
      /* The jump may be a by-product of a branch that has been merged
244
         in the main codepath after being conditionalized.  Therefore
245
         it may guard the fallthrough block from using a value that has
246
         conditionally overwritten that of the main codepath.  So we
247
         consider that it restores the value of the main codepath.  */
248
      bitmap_and (set, glat_start [e->dest->index], cond_set);
249
    else
250
      bitmap_ior_into (used, glat_start [e->dest->index]);
251
}
252
 
253
/* Used in schedule_insns to initialize current_sched_info for scheduling
254
   regions (or single basic blocks).  */
255
 
256
static struct sched_info ebb_sched_info =
257
{
258
  init_ready_list,
259
  NULL,
260
  schedule_more_p,
261
  NULL,
262
  rank,
263
  ebb_print_insn,
264
  contributes_to_priority,
265
  compute_jump_reg_dependencies,
266
 
267
  NULL, NULL,
268
  NULL, NULL,
269
  0, 1, 0,
270
 
271
  add_remove_insn,
272
  begin_schedule_ready,
273
  add_block1,
274
  advance_target_bb,
275
  fix_recovery_cfg,
276
#ifdef ENABLE_CHECKING
277
  ebb_head_or_leaf_p,
278
#endif
279
  /* We need to DETACH_LIVE_INFO to be able to create new basic blocks.
280
     See begin_schedule_ready ().  */
281
  SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO
282
};
283
 
284
/* Returns the earliest block in EBB currently being processed where a
285
   "similar load" 'insn2' is found, and hence LOAD_INSN can move
286
   speculatively into the found block.  All the following must hold:
287
 
288
   (1) both loads have 1 base register (PFREE_CANDIDATEs).
289
   (2) load_insn and load2 have a def-use dependence upon
290
   the same insn 'insn1'.
291
 
292
   From all these we can conclude that the two loads access memory
293
   addresses that differ at most by a constant, and hence if moving
294
   load_insn would cause an exception, it would have been caused by
295
   load2 anyhow.
296
 
297
   The function uses list (given by LAST_BLOCK) of already processed
298
   blocks in EBB.  The list is formed in `add_deps_for_risky_insns'.  */
299
 
300
static basic_block
301
earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
302
{
303
  rtx back_link;
304
  basic_block bb, earliest_block = NULL;
305
 
306
  for (back_link = LOG_LINKS (load_insn);
307
       back_link;
308
       back_link = XEXP (back_link, 1))
309
    {
310
      rtx insn1 = XEXP (back_link, 0);
311
 
312
      if (GET_MODE (back_link) == VOIDmode)
313
        {
314
          /* Found a DEF-USE dependence (insn1, load_insn).  */
315
          rtx fore_link;
316
 
317
          for (fore_link = INSN_DEPEND (insn1);
318
               fore_link;
319
               fore_link = XEXP (fore_link, 1))
320
            {
321
              rtx insn2 = XEXP (fore_link, 0);
322
              basic_block insn2_block = BLOCK_FOR_INSN (insn2);
323
 
324
              if (GET_MODE (fore_link) == VOIDmode)
325
                {
326
                  if (earliest_block != NULL
327
                      && earliest_block->index < insn2_block->index)
328
                    continue;
329
 
330
                  /* Found a DEF-USE dependence (insn1, insn2).  */
331
                  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
332
                    /* insn2 not guaranteed to be a 1 base reg load.  */
333
                    continue;
334
 
335
                  for (bb = last_block; bb; bb = bb->aux)
336
                    if (insn2_block == bb)
337
                      break;
338
 
339
                  if (!bb)
340
                    /* insn2 is the similar load.  */
341
                    earliest_block = insn2_block;
342
                }
343
            }
344
        }
345
    }
346
 
347
  return earliest_block;
348
}
349
 
350
/* The following function adds dependencies between jumps and risky
351
   insns in given ebb.  */
352
 
353
static void
354
add_deps_for_risky_insns (rtx head, rtx tail)
355
{
356
  rtx insn, prev;
357
  int class;
358
  rtx last_jump = NULL_RTX;
359
  rtx next_tail = NEXT_INSN (tail);
360
  basic_block last_block = NULL, bb;
361
 
362
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
363
    if (control_flow_insn_p (insn))
364
      {
365
        bb = BLOCK_FOR_INSN (insn);
366
        bb->aux = last_block;
367
        last_block = bb;
368
        last_jump = insn;
369
      }
370
    else if (INSN_P (insn) && last_jump != NULL_RTX)
371
      {
372
        class = haifa_classify_insn (insn);
373
        prev = last_jump;
374
        switch (class)
375
          {
376
          case PFREE_CANDIDATE:
377
            if (flag_schedule_speculative_load)
378
              {
379
                bb = earliest_block_with_similiar_load (last_block, insn);
380
                if (bb)
381
                  {
382
                    bb = bb->aux;
383
                    if (!bb)
384
                      break;
385
                    prev = BB_END (bb);
386
                  }
387
              }
388
            /* Fall through.  */
389
          case TRAP_RISKY:
390
          case IRISKY:
391
          case PRISKY_CANDIDATE:
392
            /* ??? We could implement better checking PRISKY_CANDIDATEs
393
               analogous to sched-rgn.c.  */
394
            /* We can not change the mode of the backward
395
               dependency because REG_DEP_ANTI has the lowest
396
               rank.  */
397
            if (! sched_insns_conditions_mutex_p (insn, prev))
398
              {
399
                if (!(current_sched_info->flags & DO_SPECULATION))
400
                  {
401
                    enum DEPS_ADJUST_RESULT res;
402
 
403
                    res = add_or_update_back_dep (insn, prev,
404
                                                  REG_DEP_ANTI, DEP_ANTI);
405
 
406
                    if (res == DEP_CREATED)
407
                      add_forw_dep (insn, LOG_LINKS (insn));
408
                    else
409
                      gcc_assert (res != DEP_CHANGED);
410
                  }
411
                else
412
                  add_or_update_back_forw_dep (insn, prev, REG_DEP_ANTI,
413
                                               set_dep_weak (DEP_ANTI,
414
                                                             BEGIN_CONTROL,
415
                                                             MAX_DEP_WEAK));
416
              }
417
 
418
            break;
419
 
420
          default:
421
            break;
422
          }
423
      }
424
  /* Maintain the invariant that bb->aux is clear after use.  */
425
  while (last_block)
426
    {
427
      bb = last_block->aux;
428
      last_block->aux = NULL;
429
      last_block = bb;
430
    }
431
}
432
 
433
/* Schedule a single extended basic block, defined by the boundaries HEAD
434
   and TAIL.  */
435
 
436
static basic_block
437
schedule_ebb (rtx head, rtx tail)
438
{
439
  basic_block first_bb, target_bb;
440
  struct deps tmp_deps;
441
 
442
  first_bb = BLOCK_FOR_INSN (head);
443
  last_bb = BLOCK_FOR_INSN (tail);
444
 
445
  if (no_real_insns_p (head, tail))
446
    return BLOCK_FOR_INSN (tail);
447
 
448
  gcc_assert (INSN_P (head) && INSN_P (tail));
449
 
450
  if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
451
    {
452
      init_deps_global ();
453
 
454
      /* Compute LOG_LINKS.  */
455
      init_deps (&tmp_deps);
456
      sched_analyze (&tmp_deps, head, tail);
457
      free_deps (&tmp_deps);
458
 
459
      /* Compute INSN_DEPEND.  */
460
      compute_forward_dependences (head, tail);
461
 
462
      add_deps_for_risky_insns (head, tail);
463
 
464
      if (targetm.sched.dependencies_evaluation_hook)
465
        targetm.sched.dependencies_evaluation_hook (head, tail);
466
 
467
      finish_deps_global ();
468
    }
469
  else
470
    /* Only recovery blocks can have their dependencies already calculated,
471
       and they always are single block ebbs.  */
472
    gcc_assert (first_bb == last_bb);
473
 
474
  /* Set priorities.  */
475
  current_sched_info->sched_max_insns_priority = 0;
476
  n_insns = set_priorities (head, tail);
477
  current_sched_info->sched_max_insns_priority++;
478
 
479
  current_sched_info->prev_head = PREV_INSN (head);
480
  current_sched_info->next_tail = NEXT_INSN (tail);
481
 
482
  if (write_symbols != NO_DEBUG)
483
    {
484
      save_line_notes (first_bb->index, head, tail);
485
      rm_line_notes (head, tail);
486
    }
487
 
488
  /* rm_other_notes only removes notes which are _inside_ the
489
     block---that is, it won't remove notes before the first real insn
490
     or after the last real insn of the block.  So if the first insn
491
     has a REG_SAVE_NOTE which would otherwise be emitted before the
492
     insn, it is redundant with the note before the start of the
493
     block, and so we have to take it out.  */
494
  if (INSN_P (head))
495
    {
496
      rtx note;
497
 
498
      for (note = REG_NOTES (head); note; note = XEXP (note, 1))
499
        if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
500
          remove_note (head, note);
501
    }
502
 
503
  /* Remove remaining note insns from the block, save them in
504
     note_list.  These notes are restored at the end of
505
     schedule_block ().  */
506
  rm_other_notes (head, tail);
507
 
508
  unlink_bb_notes (first_bb, last_bb);
509
 
510
  current_sched_info->queue_must_finish_empty = 1;
511
 
512
  target_bb = first_bb;
513
  schedule_block (&target_bb, n_insns);
514
 
515
  /* We might pack all instructions into fewer blocks,
516
     so we may made some of them empty.  Can't assert (b == last_bb).  */
517
 
518
  /* Sanity check: verify that all region insns were scheduled.  */
519
  gcc_assert (sched_n_insns == n_insns);
520
  head = current_sched_info->head;
521
  tail = current_sched_info->tail;
522
 
523
  if (write_symbols != NO_DEBUG)
524
    restore_line_notes (head, tail);
525
 
526
  if (EDGE_COUNT (last_bb->preds) == 0)
527
    /* LAST_BB is unreachable.  */
528
    {
529
      gcc_assert (first_bb != last_bb
530
                  && EDGE_COUNT (last_bb->succs) == 0);
531
      last_bb = last_bb->prev_bb;
532
      delete_basic_block (last_bb->next_bb);
533
    }
534
 
535
  return last_bb;
536
}
537
 
538
/* The one entry point in this file.  */
539
 
540
void
541
schedule_ebbs (void)
542
{
543
  basic_block bb;
544
  int probability_cutoff;
545
  rtx tail;
546
  sbitmap large_region_blocks, blocks;
547
  int any_large_regions;
548
 
549
  if (profile_info && flag_branch_probabilities)
550
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
551
  else
552
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
553
  probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
554
 
555
  /* Taking care of this degenerate case makes the rest of
556
     this code simpler.  */
557
  if (n_basic_blocks == NUM_FIXED_BLOCKS)
558
    return;
559
 
560
  /* We need current_sched_info in init_dependency_caches, which is
561
     invoked via sched_init.  */
562
  current_sched_info = &ebb_sched_info;
563
 
564
  sched_init ();
565
 
566
  compute_bb_for_insn ();
567
 
568
  /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  */
569
  bitmap_initialize (&dont_calc_deps, 0);
570
  bitmap_clear (&dont_calc_deps);
571
  bitmap_initialize (&ebb_head, 0);
572
  bitmap_clear (&ebb_head);
573
  bitmap_initialize (&ebb_tail, 0);
574
  bitmap_clear (&ebb_tail);
575
 
576
  /* Schedule every region in the subroutine.  */
577
  FOR_EACH_BB (bb)
578
    {
579
      rtx head = BB_HEAD (bb);
580
 
581
      for (;;)
582
        {
583
          edge e;
584
          edge_iterator ei;
585
          tail = BB_END (bb);
586
          if (bb->next_bb == EXIT_BLOCK_PTR
587
              || LABEL_P (BB_HEAD (bb->next_bb)))
588
            break;
589
          FOR_EACH_EDGE (e, ei, bb->succs)
590
            if ((e->flags & EDGE_FALLTHRU) != 0)
591
              break;
592
          if (! e)
593
            break;
594
          if (e->probability <= probability_cutoff)
595
            break;
596
          bb = bb->next_bb;
597
        }
598
 
599
      /* Blah.  We should fix the rest of the code not to get confused by
600
         a note or two.  */
601
      while (head != tail)
602
        {
603
          if (NOTE_P (head))
604
            head = NEXT_INSN (head);
605
          else if (NOTE_P (tail))
606
            tail = PREV_INSN (tail);
607
          else if (LABEL_P (head))
608
            head = NEXT_INSN (head);
609
          else
610
            break;
611
        }
612
 
613
      bitmap_set_bit (&ebb_head, BLOCK_NUM (head));
614
      bb = schedule_ebb (head, tail);
615
      bitmap_set_bit (&ebb_tail, bb->index);
616
    }
617
  bitmap_clear (&dont_calc_deps);
618
 
619
  gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO);
620
  /* We can create new basic blocks during scheduling, and
621
     attach_life_info () will create regsets for them
622
     (along with attaching existing info back).  */
623
  attach_life_info ();
624
 
625
  /* Updating register live information.  */
626
  allocate_reg_life_data ();
627
 
628
  any_large_regions = 0;
629
  large_region_blocks = sbitmap_alloc (last_basic_block);
630
  sbitmap_zero (large_region_blocks);
631
  FOR_EACH_BB (bb)
632
    SET_BIT (large_region_blocks, bb->index);
633
 
634
  blocks = sbitmap_alloc (last_basic_block);
635
  sbitmap_zero (blocks);
636
 
637
  /* Update life information.  For regions consisting of multiple blocks
638
     we've possibly done interblock scheduling that affects global liveness.
639
     For regions consisting of single blocks we need to do only local
640
     liveness.  */
641
  FOR_EACH_BB (bb)
642
    {
643
      int bbi;
644
 
645
      bbi = bb->index;
646
 
647
      if (!bitmap_bit_p (&ebb_head, bbi)
648
          || !bitmap_bit_p (&ebb_tail, bbi)
649
          /* New blocks (e.g. recovery blocks) should be processed
650
             as parts of large regions.  */
651
          || !glat_start[bbi])
652
        any_large_regions = 1;
653
      else
654
        {
655
          SET_BIT (blocks, bbi);
656
          RESET_BIT (large_region_blocks, bbi);
657
        }
658
    }
659
 
660
  update_life_info (blocks, UPDATE_LIFE_LOCAL, 0);
661
  sbitmap_free (blocks);
662
 
663
  if (any_large_regions)
664
    {
665
      update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, 0);
666
 
667
#ifdef ENABLE_CHECKING
668
      /* !!! We can't check reg_live_info here because of the fact,
669
         that destination registers of COND_EXEC's may be dead
670
         before scheduling (while they should be alive).  Don't know why.  */
671
      /*check_reg_live (true);*/
672
#endif
673
    }
674
  sbitmap_free (large_region_blocks);
675
 
676
  bitmap_clear (&ebb_head);
677
  bitmap_clear (&ebb_tail);
678
 
679
  /* Reposition the prologue and epilogue notes in case we moved the
680
     prologue/epilogue insns.  */
681
  if (reload_completed)
682
    reposition_prologue_and_epilogue_notes (get_insns ());
683
 
684
  if (write_symbols != NO_DEBUG)
685
    rm_redundant_line_notes ();
686
 
687
  sched_finish ();
688
}
689
 
690
/* INSN has been added to/removed from current ebb.  */
691
static void
692
add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
693
{
694
  if (!remove_p)
695
    n_insns++;
696
  else
697
    n_insns--;
698
}
699
 
700
/* BB was added to ebb after AFTER.  */
701
static void
702
add_block1 (basic_block bb, basic_block after)
703
{
704
  /* Recovery blocks are always bounded by BARRIERS,
705
     therefore, they always form single block EBB,
706
     therefore, we can use rec->index to identify such EBBs.  */
707
  if (after == EXIT_BLOCK_PTR)
708
    bitmap_set_bit (&dont_calc_deps, bb->index);
709
  else if (after == last_bb)
710
    last_bb = bb;
711
}
712
 
713
/* Return next block in ebb chain.  For parameter meaning please refer to
714
   sched-int.h: struct sched_info: advance_target_bb.  */
715
static basic_block
716
advance_target_bb (basic_block bb, rtx insn)
717
{
718
  if (insn)
719
    {
720
      if (BLOCK_FOR_INSN (insn) != bb
721
          && control_flow_insn_p (insn)
722
          /* We handle interblock movement of the speculation check
723
             or over a speculation check in
724
             haifa-sched.c: move_block_after_check ().  */
725
          && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
726
          && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
727
        {
728
          /* Assert that we don't move jumps across blocks.  */
729
          gcc_assert (!control_flow_insn_p (BB_END (bb))
730
                      && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
731
          return bb;
732
        }
733
      else
734
        return 0;
735
    }
736
  else
737
    /* Return next non empty block.  */
738
    {
739
      do
740
        {
741
          gcc_assert (bb != last_bb);
742
 
743
          bb = bb->next_bb;
744
        }
745
      while (bb_note (bb) == BB_END (bb));
746
 
747
      return bb;
748
    }
749
}
750
 
751
/* Fix internal data after interblock movement of jump instruction.
752
   For parameter meaning please refer to
753
   sched-int.h: struct sched_info: fix_recovery_cfg.  */
754
static void
755
fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti)
756
{
757
  gcc_assert (last_bb->index != bbi);
758
 
759
  if (jump_bb_nexti == last_bb->index)
760
    last_bb = BASIC_BLOCK (jump_bbi);
761
}
762
 
763
#ifdef ENABLE_CHECKING
764
/* Return non zero, if BB is first or last (depending of LEAF_P) block in
765
   current ebb.  For more information please refer to
766
   sched-int.h: struct sched_info: region_head_or_leaf_p.  */
767
static int
768
ebb_head_or_leaf_p (basic_block bb, int leaf_p)
769
{
770
  if (!leaf_p)
771
    return bitmap_bit_p (&ebb_head, bb->index);
772
  else
773
    return bitmap_bit_p (&ebb_tail, bb->index);
774
}
775
#endif /* ENABLE_CHECKING  */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.