OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-gcc/] [gcc-4.1.1/] [gcc/] [sched-ebb.c] - Blame information for rev 12

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 12 jlechner
/* Instruction scheduling pass.
2
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5
   and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify it under
10
the terms of the GNU General Public License as published by the Free
11
Software Foundation; either version 2, or (at your option) any later
12
version.
13
 
14
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15
WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17
for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with GCC; see the file COPYING.  If not, write to the Free
21
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.  */
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "tm.h"
28
#include "toplev.h"
29
#include "rtl.h"
30
#include "tm_p.h"
31
#include "hard-reg-set.h"
32
#include "regs.h"
33
#include "function.h"
34
#include "flags.h"
35
#include "insn-config.h"
36
#include "insn-attr.h"
37
#include "except.h"
38
#include "toplev.h"
39
#include "recog.h"
40
#include "cfglayout.h"
41
#include "params.h"
42
#include "sched-int.h"
43
#include "target.h"
44
 
45
/* The number of insns to be scheduled in total.  */
46
static int target_n_insns;
47
/* The number of insns scheduled so far.  */
48
static int sched_n_insns;
49
 
50
/* Implementations of the sched_info functions for region scheduling.  */
51
static void init_ready_list (struct ready_list *);
52
static int can_schedule_ready_p (rtx);
53
static int new_ready (rtx);
54
static int schedule_more_p (void);
55
static const char *ebb_print_insn (rtx, int);
56
static int rank (rtx, rtx);
57
static int contributes_to_priority (rtx, rtx);
58
static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
59
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
60
static void add_deps_for_risky_insns (rtx, rtx);
61
static basic_block schedule_ebb (rtx, rtx);
62
static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx,
63
                                               rtx);
64
static void add_missing_bbs (rtx, basic_block, basic_block);
65
 
66
/* Return nonzero if there are more insns that should be scheduled.  */
67
 
68
static int
69
schedule_more_p (void)
70
{
71
  return sched_n_insns < target_n_insns;
72
}
73
 
74
/* Add all insns that are initially ready to the ready list READY.  Called
75
   once before scheduling a set of insns.  */
76
 
77
static void
78
init_ready_list (struct ready_list *ready)
79
{
80
  rtx prev_head = current_sched_info->prev_head;
81
  rtx next_tail = current_sched_info->next_tail;
82
  rtx insn;
83
 
84
  target_n_insns = 0;
85
  sched_n_insns = 0;
86
 
87
#if 0
88
  /* Print debugging information.  */
89
  if (sched_verbose >= 5)
90
    debug_dependencies ();
91
#endif
92
 
93
  /* Initialize ready list with all 'ready' insns in target block.
94
     Count number of insns in the target block being scheduled.  */
95
  for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
96
    {
97
      if (INSN_DEP_COUNT (insn) == 0)
98
        ready_add (ready, insn);
99
      target_n_insns++;
100
    }
101
}
102
 
103
/* Called after taking INSN from the ready list.  Returns nonzero if this
104
   insn can be scheduled, nonzero if we should silently discard it.  */
105
 
106
static int
107
can_schedule_ready_p (rtx insn ATTRIBUTE_UNUSED)
108
{
109
  sched_n_insns++;
110
  return 1;
111
}
112
 
113
/* Called after INSN has all its dependencies resolved.  Return nonzero
114
   if it should be moved to the ready list or the queue, or zero if we
115
   should silently discard it.  */
116
static int
117
new_ready (rtx next ATTRIBUTE_UNUSED)
118
{
119
  return 1;
120
}
121
 
122
/* Return a string that contains the insn uid and optionally anything else
123
   necessary to identify this insn in an output.  It's valid to use a
124
   static buffer for this.  The ALIGNED parameter should cause the string
125
   to be formatted so that multiple output lines will line up nicely.  */
126
 
127
static const char *
128
ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
129
{
130
  static char tmp[80];
131
 
132
  sprintf (tmp, "%4d", INSN_UID (insn));
133
  return tmp;
134
}
135
 
136
/* Compare priority of two insns.  Return a positive number if the second
137
   insn is to be preferred for scheduling, and a negative one if the first
138
   is to be preferred.  Zero if they are equally good.  */
139
 
140
static int
141
rank (rtx insn1, rtx insn2)
142
{
143
  basic_block bb1 = BLOCK_FOR_INSN (insn1);
144
  basic_block bb2 = BLOCK_FOR_INSN (insn2);
145
 
146
  if (bb1->count > bb2->count
147
      || bb1->frequency > bb2->frequency)
148
    return -1;
149
  if (bb1->count < bb2->count
150
      || bb1->frequency < bb2->frequency)
151
    return 1;
152
  return 0;
153
}
154
 
155
/* NEXT is an instruction that depends on INSN (a backward dependence);
156
   return nonzero if we should include this dependence in priority
157
   calculations.  */
158
 
159
static int
160
contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
161
                         rtx insn ATTRIBUTE_UNUSED)
162
{
163
  return 1;
164
}
165
 
166
 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
167
    conditionally set before INSN.  Store the set of registers that
168
    must be considered as used by this jump in USED and that of
169
    registers that must be considered as set in SET.  */
170
 
171
static void
172
compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
173
                               regset set)
174
{
175
  basic_block b = BLOCK_FOR_INSN (insn);
176
  edge e;
177
  edge_iterator ei;
178
 
179
  FOR_EACH_EDGE (e, ei, b->succs)
180
    if (e->flags & EDGE_FALLTHRU)
181
      /* The jump may be a by-product of a branch that has been merged
182
         in the main codepath after being conditionalized.  Therefore
183
         it may guard the fallthrough block from using a value that has
184
         conditionally overwritten that of the main codepath.  So we
185
         consider that it restores the value of the main codepath.  */
186
      bitmap_and (set, e->dest->il.rtl->global_live_at_start, cond_set);
187
    else
188
      bitmap_ior_into (used, e->dest->il.rtl->global_live_at_start);
189
}
190
 
191
/* Used in schedule_insns to initialize current_sched_info for scheduling
192
   regions (or single basic blocks).  */
193
 
194
static struct sched_info ebb_sched_info =
195
{
196
  init_ready_list,
197
  can_schedule_ready_p,
198
  schedule_more_p,
199
  new_ready,
200
  rank,
201
  ebb_print_insn,
202
  contributes_to_priority,
203
  compute_jump_reg_dependencies,
204
 
205
  NULL, NULL,
206
  NULL, NULL,
207
  0, 1, 0
208
};
209
 
210
/* It is possible that ebb scheduling eliminated some blocks.
211
   Place blocks from FIRST to LAST before BEFORE.  */
212
 
213
static void
214
add_missing_bbs (rtx before, basic_block first, basic_block last)
215
{
216
  for (; last != first->prev_bb; last = last->prev_bb)
217
    {
218
      before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before);
219
      NOTE_BASIC_BLOCK (before) = last;
220
      BB_HEAD (last) = before;
221
      BB_END (last) = before;
222
      update_bb_for_insn (last);
223
    }
224
}
225
 
226
/* Fixup the CFG after EBB scheduling.  Re-recognize the basic
227
   block boundaries in between HEAD and TAIL and update basic block
228
   structures between BB and LAST.  */
229
 
230
static basic_block
231
fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head,
232
                            rtx tail)
233
{
234
  rtx insn = head;
235
  rtx last_inside = BB_HEAD (bb);
236
  rtx aftertail = NEXT_INSN (tail);
237
 
238
  head = BB_HEAD (bb);
239
 
240
  for (; insn != aftertail; insn = NEXT_INSN (insn))
241
    {
242
      gcc_assert (!LABEL_P (insn));
243
      /* Create new basic blocks just before first insn.  */
244
      if (inside_basic_block_p (insn))
245
        {
246
          if (!last_inside)
247
            {
248
              rtx note;
249
 
250
              /* Re-emit the basic block note for newly found BB header.  */
251
              if (LABEL_P (insn))
252
                {
253
                  note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn);
254
                  head = insn;
255
                  last_inside = note;
256
                }
257
              else
258
                {
259
                  note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn);
260
                  head = note;
261
                  last_inside = insn;
262
                }
263
            }
264
          else
265
            last_inside = insn;
266
        }
267
      /* Control flow instruction terminate basic block.  It is possible
268
         that we've eliminated some basic blocks (made them empty).
269
         Find the proper basic block using BLOCK_FOR_INSN and arrange things in
270
         a sensible way by inserting empty basic blocks as needed.  */
271
      if (control_flow_insn_p (insn) || (insn == tail && last_inside))
272
        {
273
          basic_block curr_bb = BLOCK_FOR_INSN (insn);
274
          rtx note;
275
 
276
          if (!control_flow_insn_p (insn))
277
            curr_bb = last;
278
          if (bb == last->next_bb)
279
            {
280
              edge f;
281
              rtx h;
282
              edge_iterator ei;
283
 
284
              /* An obscure special case, where we do have partially dead
285
                 instruction scheduled after last control flow instruction.
286
                 In this case we can create new basic block.  It is
287
                 always exactly one basic block last in the sequence.  Handle
288
                 it by splitting the edge and repositioning the block.
289
                 This is somewhat hackish, but at least avoid cut&paste
290
 
291
                 A safer solution can be to bring the code into sequence,
292
                 do the split and re-emit it back in case this will ever
293
                 trigger problem.  */
294
 
295
              FOR_EACH_EDGE (f, ei, bb->prev_bb->succs)
296
                if (f->flags & EDGE_FALLTHRU)
297
                  break;
298
 
299
              if (f)
300
                {
301
                  last = curr_bb = split_edge (f);
302
                  h = BB_HEAD (curr_bb);
303
                  BB_HEAD (curr_bb) = head;
304
                  BB_END (curr_bb) = insn;
305
                  /* Edge splitting created misplaced BASIC_BLOCK note, kill
306
                     it.  */
307
                  delete_insn (h);
308
                }
309
              /* It may happen that code got moved past unconditional jump in
310
                 case the code is completely dead.  Kill it.  */
311
              else
312
                {
313
                  rtx next = next_nonnote_insn (insn);
314
                  delete_insn_chain (head, insn);
315
                  /* We keep some notes in the way that may split barrier from the
316
                     jump.  */
317
                  if (BARRIER_P (next))
318
                     {
319
                       emit_barrier_after (prev_nonnote_insn (head));
320
                       delete_insn (next);
321
                     }
322
                  insn = NULL;
323
                }
324
            }
325
          else
326
            {
327
              BB_HEAD (curr_bb) = head;
328
              BB_END (curr_bb) = insn;
329
              add_missing_bbs (BB_HEAD (curr_bb), bb, curr_bb->prev_bb);
330
            }
331
          note = LABEL_P (head) ? NEXT_INSN (head) : head;
332
          NOTE_BASIC_BLOCK (note) = curr_bb;
333
          update_bb_for_insn (curr_bb);
334
          bb = curr_bb->next_bb;
335
          last_inside = NULL;
336
          if (!insn)
337
             break;
338
        }
339
    }
340
  add_missing_bbs (BB_HEAD (last->next_bb), bb, last);
341
  return bb->prev_bb;
342
}
343
 
344
/* Returns the earliest block in EBB currently being processed where a
345
   "similar load" 'insn2' is found, and hence LOAD_INSN can move
346
   speculatively into the found block.  All the following must hold:
347
 
348
   (1) both loads have 1 base register (PFREE_CANDIDATEs).
349
   (2) load_insn and load2 have a def-use dependence upon
350
   the same insn 'insn1'.
351
 
352
   From all these we can conclude that the two loads access memory
353
   addresses that differ at most by a constant, and hence if moving
354
   load_insn would cause an exception, it would have been caused by
355
   load2 anyhow.
356
 
357
   The function uses list (given by LAST_BLOCK) of already processed
358
   blocks in EBB.  The list is formed in `add_deps_for_risky_insns'.  */
359
 
360
static basic_block
361
earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
362
{
363
  rtx back_link;
364
  basic_block bb, earliest_block = NULL;
365
 
366
  for (back_link = LOG_LINKS (load_insn);
367
       back_link;
368
       back_link = XEXP (back_link, 1))
369
    {
370
      rtx insn1 = XEXP (back_link, 0);
371
 
372
      if (GET_MODE (back_link) == VOIDmode)
373
        {
374
          /* Found a DEF-USE dependence (insn1, load_insn).  */
375
          rtx fore_link;
376
 
377
          for (fore_link = INSN_DEPEND (insn1);
378
               fore_link;
379
               fore_link = XEXP (fore_link, 1))
380
            {
381
              rtx insn2 = XEXP (fore_link, 0);
382
              basic_block insn2_block = BLOCK_FOR_INSN (insn2);
383
 
384
              if (GET_MODE (fore_link) == VOIDmode)
385
                {
386
                  if (earliest_block != NULL
387
                      && earliest_block->index < insn2_block->index)
388
                    continue;
389
 
390
                  /* Found a DEF-USE dependence (insn1, insn2).  */
391
                  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
392
                    /* insn2 not guaranteed to be a 1 base reg load.  */
393
                    continue;
394
 
395
                  for (bb = last_block; bb; bb = bb->aux)
396
                    if (insn2_block == bb)
397
                      break;
398
 
399
                  if (!bb)
400
                    /* insn2 is the similar load.  */
401
                    earliest_block = insn2_block;
402
                }
403
            }
404
        }
405
    }
406
 
407
  return earliest_block;
408
}
409
 
410
/* The following function adds dependencies between jumps and risky
411
   insns in given ebb.  */
412
 
413
static void
414
add_deps_for_risky_insns (rtx head, rtx tail)
415
{
416
  rtx insn, prev;
417
  int class;
418
  rtx last_jump = NULL_RTX;
419
  rtx next_tail = NEXT_INSN (tail);
420
  basic_block last_block = NULL, bb;
421
 
422
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
423
    if (JUMP_P (insn))
424
      {
425
        bb = BLOCK_FOR_INSN (insn);
426
        bb->aux = last_block;
427
        last_block = bb;
428
        last_jump = insn;
429
      }
430
    else if (INSN_P (insn) && last_jump != NULL_RTX)
431
      {
432
        class = haifa_classify_insn (insn);
433
        prev = last_jump;
434
        switch (class)
435
          {
436
          case PFREE_CANDIDATE:
437
            if (flag_schedule_speculative_load)
438
              {
439
                bb = earliest_block_with_similiar_load (last_block, insn);
440
                if (bb)
441
                  {
442
                    bb = bb->aux;
443
                    if (!bb)
444
                      break;
445
                    prev = BB_END (bb);
446
                  }
447
              }
448
            /* Fall through.  */
449
          case TRAP_RISKY:
450
          case IRISKY:
451
          case PRISKY_CANDIDATE:
452
            /* ??? We could implement better checking PRISKY_CANDIDATEs
453
               analogous to sched-rgn.c.  */
454
            /* We can not change the mode of the backward
455
               dependency because REG_DEP_ANTI has the lowest
456
               rank.  */
457
            if (! sched_insns_conditions_mutex_p (insn, prev)
458
                && add_dependence (insn, prev, REG_DEP_ANTI))
459
              add_forward_dependence (prev, insn, REG_DEP_ANTI);
460
            break;
461
 
462
          default:
463
            break;
464
          }
465
      }
466
  /* Maintain the invariant that bb->aux is clear after use.  */
467
  while (last_block)
468
    {
469
      bb = last_block->aux;
470
      last_block->aux = NULL;
471
      last_block = bb;
472
    }
473
}
474
 
475
/* Schedule a single extended basic block, defined by the boundaries HEAD
476
   and TAIL.  */
477
 
478
static basic_block
479
schedule_ebb (rtx head, rtx tail)
480
{
481
  int n_insns;
482
  basic_block b;
483
  struct deps tmp_deps;
484
  basic_block first_bb = BLOCK_FOR_INSN (head);
485
  basic_block last_bb = BLOCK_FOR_INSN (tail);
486
 
487
  if (no_real_insns_p (head, tail))
488
    return BLOCK_FOR_INSN (tail);
489
 
490
  init_deps_global ();
491
 
492
  /* Compute LOG_LINKS.  */
493
  init_deps (&tmp_deps);
494
  sched_analyze (&tmp_deps, head, tail);
495
  free_deps (&tmp_deps);
496
 
497
  /* Compute INSN_DEPEND.  */
498
  compute_forward_dependences (head, tail);
499
 
500
  add_deps_for_risky_insns (head, tail);
501
 
502
  if (targetm.sched.dependencies_evaluation_hook)
503
    targetm.sched.dependencies_evaluation_hook (head, tail);
504
 
505
  /* Set priorities.  */
506
  n_insns = set_priorities (head, tail);
507
 
508
  current_sched_info->prev_head = PREV_INSN (head);
509
  current_sched_info->next_tail = NEXT_INSN (tail);
510
 
511
  if (write_symbols != NO_DEBUG)
512
    {
513
      save_line_notes (first_bb->index, head, tail);
514
      rm_line_notes (head, tail);
515
    }
516
 
517
  /* rm_other_notes only removes notes which are _inside_ the
518
     block---that is, it won't remove notes before the first real insn
519
     or after the last real insn of the block.  So if the first insn
520
     has a REG_SAVE_NOTE which would otherwise be emitted before the
521
     insn, it is redundant with the note before the start of the
522
     block, and so we have to take it out.  */
523
  if (INSN_P (head))
524
    {
525
      rtx note;
526
 
527
      for (note = REG_NOTES (head); note; note = XEXP (note, 1))
528
        if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
529
          remove_note (head, note);
530
    }
531
 
532
  /* Remove remaining note insns from the block, save them in
533
     note_list.  These notes are restored at the end of
534
     schedule_block ().  */
535
  rm_other_notes (head, tail);
536
 
537
  current_sched_info->queue_must_finish_empty = 1;
538
 
539
  schedule_block (-1, n_insns);
540
 
541
  /* Sanity check: verify that all region insns were scheduled.  */
542
  gcc_assert (sched_n_insns == n_insns);
543
  head = current_sched_info->head;
544
  tail = current_sched_info->tail;
545
 
546
  if (write_symbols != NO_DEBUG)
547
    restore_line_notes (head, tail);
548
  b = fix_basic_block_boundaries (first_bb, last_bb, head, tail);
549
 
550
  finish_deps_global ();
551
  return b;
552
}
553
 
554
/* The one entry point in this file.  DUMP_FILE is the dump file for
555
   this pass.  */
556
 
557
void
558
schedule_ebbs (FILE *dump_file)
559
{
560
  basic_block bb;
561
  int probability_cutoff;
562
 
563
  if (profile_info && flag_branch_probabilities)
564
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
565
  else
566
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
567
  probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
568
 
569
  /* Taking care of this degenerate case makes the rest of
570
     this code simpler.  */
571
  if (n_basic_blocks == 0)
572
    return;
573
 
574
  sched_init (dump_file);
575
 
576
  current_sched_info = &ebb_sched_info;
577
 
578
  compute_bb_for_insn ();
579
 
580
  /* Schedule every region in the subroutine.  */
581
  FOR_EACH_BB (bb)
582
    {
583
      rtx head = BB_HEAD (bb);
584
      rtx tail;
585
 
586
      for (;;)
587
        {
588
          edge e;
589
          edge_iterator ei;
590
          tail = BB_END (bb);
591
          if (bb->next_bb == EXIT_BLOCK_PTR
592
              || LABEL_P (BB_HEAD (bb->next_bb)))
593
            break;
594
          FOR_EACH_EDGE (e, ei, bb->succs)
595
            if ((e->flags & EDGE_FALLTHRU) != 0)
596
              break;
597
          if (! e)
598
            break;
599
          if (e->probability <= probability_cutoff)
600
            break;
601
          bb = bb->next_bb;
602
        }
603
 
604
      /* Blah.  We should fix the rest of the code not to get confused by
605
         a note or two.  */
606
      while (head != tail)
607
        {
608
          if (NOTE_P (head))
609
            head = NEXT_INSN (head);
610
          else if (NOTE_P (tail))
611
            tail = PREV_INSN (tail);
612
          else if (LABEL_P (head))
613
            head = NEXT_INSN (head);
614
          else
615
            break;
616
        }
617
 
618
      bb = schedule_ebb (head, tail);
619
    }
620
 
621
  /* Updating life info can be done by local propagation over the modified
622
     superblocks.  */
623
 
624
  /* Reposition the prologue and epilogue notes in case we moved the
625
     prologue/epilogue insns.  */
626
  if (reload_completed)
627
    reposition_prologue_and_epilogue_notes (get_insns ());
628
 
629
  if (write_symbols != NO_DEBUG)
630
    rm_redundant_line_notes ();
631
 
632
  sched_finish ();
633
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.