OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [mode-switching.c] - Blame information for rev 816

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* CPU mode switching
2
   Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3
   Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "rtl.h"
26
#include "regs.h"
27
#include "hard-reg-set.h"
28
#include "flags.h"
29
#include "real.h"
30
#include "insn-config.h"
31
#include "recog.h"
32
#include "basic-block.h"
33
#include "output.h"
34
#include "tm_p.h"
35
#include "function.h"
36
#include "tree-pass.h"
37
#include "timevar.h"
38
 
39
/* We want target macros for the mode switching code to be able to refer
40
   to instruction attribute values.  */
41
#include "insn-attr.h"
42
 
43
#ifdef OPTIMIZE_MODE_SWITCHING
44
 
45
/* The algorithm for setting the modes consists of scanning the insn list
46
   and finding all the insns which require a specific mode.  Each insn gets
47
   a unique struct seginfo element.  These structures are inserted into a list
48
   for each basic block.  For each entity, there is an array of bb_info over
49
   the flow graph basic blocks (local var 'bb_info'), and contains a list
50
   of all insns within that basic block, in the order they are encountered.
51
 
52
   For each entity, any basic block WITHOUT any insns requiring a specific
53
   mode are given a single entry, without a mode.  (Each basic block
54
   in the flow graph must have at least one entry in the segment table.)
55
 
56
   The LCM algorithm is then run over the flow graph to determine where to
57
   place the sets to the highest-priority value in respect of first the first
58
   insn in any one block.  Any adjustments required to the transparency
59
   vectors are made, then the next iteration starts for the next-lower
60
   priority mode, till for each entity all modes are exhausted.
61
 
62
   More details are located in the code for optimize_mode_switching().  */
63
 
64
/* This structure contains the information for each insn which requires
65
   either single or double mode to be set.
66
   MODE is the mode this insn must be executed in.
67
   INSN_PTR is the insn to be executed (may be the note that marks the
68
   beginning of a basic block).
69
   BBNUM is the flow graph basic block this insn occurs in.
70
   NEXT is the next insn in the same basic block.  */
71
struct seginfo
72
{
73
  int mode;
74
  rtx insn_ptr;
75
  int bbnum;
76
  struct seginfo *next;
77
  HARD_REG_SET regs_live;
78
};
79
 
80
struct bb_info
81
{
82
  struct seginfo *seginfo;
83
  int computing;
84
};
85
 
86
/* These bitmaps are used for the LCM algorithm.  */
87
 
88
static sbitmap *antic;
89
static sbitmap *transp;
90
static sbitmap *comp;
91
 
92
static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET);
93
static void add_seginfo (struct bb_info *, struct seginfo *);
94
static void reg_dies (rtx, HARD_REG_SET);
95
static void reg_becomes_live (rtx, rtx, void *);
96
static void make_preds_opaque (basic_block, int);
97
 
98
 
99
/* This function will allocate a new BBINFO structure, initialized
100
   with the MODE, INSN, and basic block BB parameters.  */
101
 
102
static struct seginfo *
103
new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
104
{
105
  struct seginfo *ptr;
106
  ptr = XNEW (struct seginfo);
107
  ptr->mode = mode;
108
  ptr->insn_ptr = insn;
109
  ptr->bbnum = bb;
110
  ptr->next = NULL;
111
  COPY_HARD_REG_SET (ptr->regs_live, regs_live);
112
  return ptr;
113
}
114
 
115
/* Add a seginfo element to the end of a list.
116
   HEAD is a pointer to the list beginning.
117
   INFO is the structure to be linked in.  */
118
 
119
static void
120
add_seginfo (struct bb_info *head, struct seginfo *info)
121
{
122
  struct seginfo *ptr;
123
 
124
  if (head->seginfo == NULL)
125
    head->seginfo = info;
126
  else
127
    {
128
      ptr = head->seginfo;
129
      while (ptr->next != NULL)
130
        ptr = ptr->next;
131
      ptr->next = info;
132
    }
133
}
134
 
135
/* Make all predecessors of basic block B opaque, recursively, till we hit
136
   some that are already non-transparent, or an edge where aux is set; that
137
   denotes that a mode set is to be done on that edge.
138
   J is the bit number in the bitmaps that corresponds to the entity that
139
   we are currently handling mode-switching for.  */
140
 
141
static void
142
make_preds_opaque (basic_block b, int j)
143
{
144
  edge e;
145
  edge_iterator ei;
146
 
147
  FOR_EACH_EDGE (e, ei, b->preds)
148
    {
149
      basic_block pb = e->src;
150
 
151
      if (e->aux || ! TEST_BIT (transp[pb->index], j))
152
        continue;
153
 
154
      RESET_BIT (transp[pb->index], j);
155
      make_preds_opaque (pb, j);
156
    }
157
}
158
 
159
/* Record in LIVE that register REG died.  */
160
 
161
static void
162
reg_dies (rtx reg, HARD_REG_SET live)
163
{
164
  int regno, nregs;
165
 
166
  if (!REG_P (reg))
167
    return;
168
 
169
  regno = REGNO (reg);
170
  if (regno < FIRST_PSEUDO_REGISTER)
171
    for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0;
172
         nregs--)
173
      CLEAR_HARD_REG_BIT (live, regno + nregs);
174
}
175
 
176
/* Record in LIVE that register REG became live.
177
   This is called via note_stores.  */
178
 
179
static void
180
reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live)
181
{
182
  int regno, nregs;
183
 
184
  if (GET_CODE (reg) == SUBREG)
185
    reg = SUBREG_REG (reg);
186
 
187
  if (!REG_P (reg))
188
    return;
189
 
190
  regno = REGNO (reg);
191
  if (regno < FIRST_PSEUDO_REGISTER)
192
    for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0;
193
         nregs--)
194
      SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs);
195
}
196
 
197
/* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined
198
   and vice versa.  */
199
#if defined (MODE_ENTRY) != defined (MODE_EXIT)
200
 #error "Both MODE_ENTRY and MODE_EXIT must be defined"
201
#endif
202
 
203
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
204
/* Split the fallthrough edge to the exit block, so that we can note
205
   that there NORMAL_MODE is required.  Return the new block if it's
206
   inserted before the exit block.  Otherwise return null.  */
207
 
208
static basic_block
209
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
210
{
211
  edge eg;
212
  edge_iterator ei;
213
  basic_block pre_exit;
214
 
215
  /* The only non-call predecessor at this stage is a block with a
216
     fallthrough edge; there can be at most one, but there could be
217
     none at all, e.g. when exit is called.  */
218
  pre_exit = 0;
219
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds)
220
    if (eg->flags & EDGE_FALLTHRU)
221
      {
222
        basic_block src_bb = eg->src;
223
        regset live_at_end = src_bb->il.rtl->global_live_at_end;
224
        rtx last_insn, ret_reg;
225
 
226
        gcc_assert (!pre_exit);
227
        /* If this function returns a value at the end, we have to
228
           insert the final mode switch before the return value copy
229
           to its hard register.  */
230
        if (EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 1
231
            && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
232
            && GET_CODE (PATTERN (last_insn)) == USE
233
            && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
234
          {
235
            int ret_start = REGNO (ret_reg);
236
            int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
237
            int ret_end = ret_start + nregs;
238
            int short_block = 0;
239
            int maybe_builtin_apply = 0;
240
            int forced_late_switch = 0;
241
            rtx before_return_copy;
242
 
243
            do
244
              {
245
                rtx return_copy = PREV_INSN (last_insn);
246
                rtx return_copy_pat, copy_reg;
247
                int copy_start, copy_num;
248
                int j;
249
 
250
                if (INSN_P (return_copy))
251
                  {
252
                    if (GET_CODE (PATTERN (return_copy)) == USE
253
                        && GET_CODE (XEXP (PATTERN (return_copy), 0)) == REG
254
                        && (FUNCTION_VALUE_REGNO_P
255
                            (REGNO (XEXP (PATTERN (return_copy), 0)))))
256
                      {
257
                        maybe_builtin_apply = 1;
258
                        last_insn = return_copy;
259
                        continue;
260
                      }
261
                    /* If the return register is not (in its entirety)
262
                       likely spilled, the return copy might be
263
                       partially or completely optimized away.  */
264
                    return_copy_pat = single_set (return_copy);
265
                    if (!return_copy_pat)
266
                      {
267
                        return_copy_pat = PATTERN (return_copy);
268
                        if (GET_CODE (return_copy_pat) != CLOBBER)
269
                          break;
270
                      }
271
                    copy_reg = SET_DEST (return_copy_pat);
272
                    if (GET_CODE (copy_reg) == REG)
273
                      copy_start = REGNO (copy_reg);
274
                    else if (GET_CODE (copy_reg) == SUBREG
275
                             && GET_CODE (SUBREG_REG (copy_reg)) == REG)
276
                      copy_start = REGNO (SUBREG_REG (copy_reg));
277
                    else
278
                      break;
279
                    if (copy_start >= FIRST_PSEUDO_REGISTER)
280
                      break;
281
                    copy_num
282
                      = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
283
 
284
                    /* If the return register is not likely spilled, - as is
285
                       the case for floating point on SH4 - then it might
286
                       be set by an arithmetic operation that needs a
287
                       different mode than the exit block.  */
288
                    for (j = n_entities - 1; j >= 0; j--)
289
                      {
290
                        int e = entity_map[j];
291
                        int mode = MODE_NEEDED (e, return_copy);
292
 
293
                        if (mode != num_modes[e] && mode != MODE_EXIT (e))
294
                          break;
295
                      }
296
                    if (j >= 0)
297
                      {
298
                        /* For the SH4, floating point loads depend on fpscr,
299
                           thus we might need to put the final mode switch
300
                           after the return value copy.  That is still OK,
301
                           because a floating point return value does not
302
                           conflict with address reloads.  */
303
                        if (copy_start >= ret_start
304
                            && copy_start + copy_num <= ret_end
305
                            && OBJECT_P (SET_SRC (return_copy_pat)))
306
                          forced_late_switch = 1;
307
                        break;
308
                      }
309
 
310
                    if (copy_start >= ret_start
311
                        && copy_start + copy_num <= ret_end)
312
                      nregs -= copy_num;
313
                    else if (!maybe_builtin_apply
314
                             || !FUNCTION_VALUE_REGNO_P (copy_start))
315
                      break;
316
                    last_insn = return_copy;
317
                  }
318
                /* ??? Exception handling can lead to the return value
319
                   copy being already separated from the return value use,
320
                   as in  unwind-dw2.c .
321
                   Similarly, conditionally returning without a value,
322
                   and conditionally using builtin_return can lead to an
323
                   isolated use.  */
324
                if (return_copy == BB_HEAD (src_bb))
325
                  {
326
                    short_block = 1;
327
                    break;
328
                  }
329
                last_insn = return_copy;
330
              }
331
            while (nregs);
332
 
333
            /* If we didn't see a full return value copy, verify that there
334
               is a plausible reason for this.  If some, but not all of the
335
               return register is likely spilled, we can expect that there
336
               is a copy for the likely spilled part.  */
337
            gcc_assert (!nregs
338
                        || forced_late_switch
339
                        || short_block
340
                        || !(CLASS_LIKELY_SPILLED_P
341
                             (REGNO_REG_CLASS (ret_start)))
342
                        || (nregs
343
                            != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
344
                        /* For multi-hard-register floating point
345
                           values, sometimes the likely-spilled part
346
                           is ordinarily copied first, then the other
347
                           part is set with an arithmetic operation.
348
                           This doesn't actually cause reload
349
                           failures, so let it pass.  */
350
                        || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
351
                            && nregs != 1));
352
 
353
            if (INSN_P (last_insn))
354
              {
355
                before_return_copy
356
                  = emit_note_before (NOTE_INSN_DELETED, last_insn);
357
                /* Instructions preceding LAST_INSN in the same block might
358
                   require a different mode than MODE_EXIT, so if we might
359
                   have such instructions, keep them in a separate block
360
                   from pre_exit.  */
361
                if (last_insn != BB_HEAD (src_bb))
362
                  src_bb = split_block (src_bb,
363
                                        PREV_INSN (before_return_copy))->dest;
364
              }
365
            else
366
              before_return_copy = last_insn;
367
            pre_exit = split_block (src_bb, before_return_copy)->src;
368
          }
369
        else
370
          {
371
            pre_exit = split_edge (eg);
372
            COPY_REG_SET (pre_exit->il.rtl->global_live_at_start, live_at_end);
373
            COPY_REG_SET (pre_exit->il.rtl->global_live_at_end, live_at_end);
374
          }
375
      }
376
 
377
  return pre_exit;
378
}
379
#endif
380
 
381
/* Find all insns that need a particular mode setting, and insert the
382
   necessary mode switches.  Return true if we did work.  */
383
 
384
static int
385
optimize_mode_switching (void)
386
{
387
  rtx insn;
388
  int e;
389
  basic_block bb;
390
  int need_commit = 0;
391
  sbitmap *kill;
392
  struct edge_list *edge_list;
393
  static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
394
#define N_ENTITIES ARRAY_SIZE (num_modes)
395
  int entity_map[N_ENTITIES];
396
  struct bb_info *bb_info[N_ENTITIES];
397
  int i, j;
398
  int n_entities;
399
  int max_num_modes = 0;
400
  bool emited = false;
401
  basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED;
402
 
403
  clear_bb_flags ();
404
 
405
  for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
406
    if (OPTIMIZE_MODE_SWITCHING (e))
407
      {
408
        int entry_exit_extra = 0;
409
 
410
        /* Create the list of segments within each basic block.
411
           If NORMAL_MODE is defined, allow for two extra
412
           blocks split from the entry and exit block.  */
413
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
414
        entry_exit_extra = 3;
415
#endif
416
        bb_info[n_entities]
417
          = XCNEWVEC (struct bb_info, last_basic_block + entry_exit_extra);
418
        entity_map[n_entities++] = e;
419
        if (num_modes[e] > max_num_modes)
420
          max_num_modes = num_modes[e];
421
      }
422
 
423
  if (! n_entities)
424
    return 0;
425
 
426
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
427
  /* Split the edge from the entry block, so that we can note that
428
     there NORMAL_MODE is supplied.  */
429
  post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
430
  pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
431
#endif
432
 
433
  /* Create the bitmap vectors.  */
434
 
435
  antic = sbitmap_vector_alloc (last_basic_block, n_entities);
436
  transp = sbitmap_vector_alloc (last_basic_block, n_entities);
437
  comp = sbitmap_vector_alloc (last_basic_block, n_entities);
438
 
439
  sbitmap_vector_ones (transp, last_basic_block);
440
 
441
  for (j = n_entities - 1; j >= 0; j--)
442
    {
443
      int e = entity_map[j];
444
      int no_mode = num_modes[e];
445
      struct bb_info *info = bb_info[j];
446
 
447
      /* Determine what the first use (if any) need for a mode of entity E is.
448
         This will be the mode that is anticipatable for this block.
449
         Also compute the initial transparency settings.  */
450
      FOR_EACH_BB (bb)
451
        {
452
          struct seginfo *ptr;
453
          int last_mode = no_mode;
454
          HARD_REG_SET live_now;
455
 
456
          REG_SET_TO_HARD_REG_SET (live_now,
457
                                   bb->il.rtl->global_live_at_start);
458
 
459
          /* Pretend the mode is clobbered across abnormal edges.  */
460
          {
461
            edge_iterator ei;
462
            edge e;
463
            FOR_EACH_EDGE (e, ei, bb->preds)
464
              if (e->flags & EDGE_COMPLEX)
465
                break;
466
            if (e)
467
              {
468
                ptr = new_seginfo (no_mode, BB_HEAD (bb), bb->index, live_now);
469
                add_seginfo (info + bb->index, ptr);
470
                RESET_BIT (transp[bb->index], j);
471
              }
472
          }
473
 
474
          for (insn = BB_HEAD (bb);
475
               insn != NULL && insn != NEXT_INSN (BB_END (bb));
476
               insn = NEXT_INSN (insn))
477
            {
478
              if (INSN_P (insn))
479
                {
480
                  int mode = MODE_NEEDED (e, insn);
481
                  rtx link;
482
 
483
                  if (mode != no_mode && mode != last_mode)
484
                    {
485
                      last_mode = mode;
486
                      ptr = new_seginfo (mode, insn, bb->index, live_now);
487
                      add_seginfo (info + bb->index, ptr);
488
                      RESET_BIT (transp[bb->index], j);
489
                    }
490
#ifdef MODE_AFTER
491
                  last_mode = MODE_AFTER (last_mode, insn);
492
#endif
493
                  /* Update LIVE_NOW.  */
494
                  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
495
                    if (REG_NOTE_KIND (link) == REG_DEAD)
496
                      reg_dies (XEXP (link, 0), live_now);
497
 
498
                  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
499
                  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
500
                    if (REG_NOTE_KIND (link) == REG_UNUSED)
501
                      reg_dies (XEXP (link, 0), live_now);
502
                }
503
            }
504
 
505
          info[bb->index].computing = last_mode;
506
          /* Check for blocks without ANY mode requirements.  */
507
          if (last_mode == no_mode)
508
            {
509
              ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
510
              add_seginfo (info + bb->index, ptr);
511
            }
512
        }
513
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
514
      {
515
        int mode = MODE_ENTRY (e);
516
 
517
        if (mode != no_mode)
518
          {
519
            bb = post_entry;
520
 
521
            /* By always making this nontransparent, we save
522
               an extra check in make_preds_opaque.  We also
523
               need this to avoid confusing pre_edge_lcm when
524
               antic is cleared but transp and comp are set.  */
525
            RESET_BIT (transp[bb->index], j);
526
 
527
            /* Insert a fake computing definition of MODE into entry
528
               blocks which compute no mode. This represents the mode on
529
               entry.  */
530
            info[bb->index].computing = mode;
531
 
532
            if (pre_exit)
533
              info[pre_exit->index].seginfo->mode = MODE_EXIT (e);
534
          }
535
      }
536
#endif /* NORMAL_MODE */
537
    }
538
 
539
  kill = sbitmap_vector_alloc (last_basic_block, n_entities);
540
  for (i = 0; i < max_num_modes; i++)
541
    {
542
      int current_mode[N_ENTITIES];
543
      sbitmap *delete;
544
      sbitmap *insert;
545
 
546
      /* Set the anticipatable and computing arrays.  */
547
      sbitmap_vector_zero (antic, last_basic_block);
548
      sbitmap_vector_zero (comp, last_basic_block);
549
      for (j = n_entities - 1; j >= 0; j--)
550
        {
551
          int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
552
          struct bb_info *info = bb_info[j];
553
 
554
          FOR_EACH_BB (bb)
555
            {
556
              if (info[bb->index].seginfo->mode == m)
557
                SET_BIT (antic[bb->index], j);
558
 
559
              if (info[bb->index].computing == m)
560
                SET_BIT (comp[bb->index], j);
561
            }
562
        }
563
 
564
      /* Calculate the optimal locations for the
565
         placement mode switches to modes with priority I.  */
566
 
567
      FOR_EACH_BB (bb)
568
        sbitmap_not (kill[bb->index], transp[bb->index]);
569
      edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
570
                                kill, &insert, &delete);
571
 
572
      for (j = n_entities - 1; j >= 0; j--)
573
        {
574
          /* Insert all mode sets that have been inserted by lcm.  */
575
          int no_mode = num_modes[entity_map[j]];
576
 
577
          /* Wherever we have moved a mode setting upwards in the flow graph,
578
             the blocks between the new setting site and the now redundant
579
             computation ceases to be transparent for any lower-priority
580
             mode of the same entity.  First set the aux field of each
581
             insertion site edge non-transparent, then propagate the new
582
             non-transparency from the redundant computation upwards till
583
             we hit an insertion site or an already non-transparent block.  */
584
          for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
585
            {
586
              edge eg = INDEX_EDGE (edge_list, e);
587
              int mode;
588
              basic_block src_bb;
589
              HARD_REG_SET live_at_edge;
590
              rtx mode_set;
591
 
592
              eg->aux = 0;
593
 
594
              if (! TEST_BIT (insert[e], j))
595
                continue;
596
 
597
              eg->aux = (void *)1;
598
 
599
              mode = current_mode[j];
600
              src_bb = eg->src;
601
 
602
              REG_SET_TO_HARD_REG_SET (live_at_edge,
603
                                       src_bb->il.rtl->global_live_at_end);
604
 
605
              start_sequence ();
606
              EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
607
              mode_set = get_insns ();
608
              end_sequence ();
609
 
610
              /* Do not bother to insert empty sequence.  */
611
              if (mode_set == NULL_RTX)
612
                continue;
613
 
614
              /* We should not get an abnormal edge here.  */
615
              gcc_assert (! (eg->flags & EDGE_ABNORMAL));
616
 
617
              need_commit = 1;
618
              insert_insn_on_edge (mode_set, eg);
619
            }
620
 
621
          FOR_EACH_BB_REVERSE (bb)
622
            if (TEST_BIT (delete[bb->index], j))
623
              {
624
                make_preds_opaque (bb, j);
625
                /* Cancel the 'deleted' mode set.  */
626
                bb_info[j][bb->index].seginfo->mode = no_mode;
627
              }
628
        }
629
 
630
      sbitmap_vector_free (delete);
631
      sbitmap_vector_free (insert);
632
      clear_aux_for_edges ();
633
      free_edge_list (edge_list);
634
    }
635
 
636
  /* Now output the remaining mode sets in all the segments.  */
637
  for (j = n_entities - 1; j >= 0; j--)
638
    {
639
      int no_mode = num_modes[entity_map[j]];
640
 
641
      FOR_EACH_BB_REVERSE (bb)
642
        {
643
          struct seginfo *ptr, *next;
644
          for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
645
            {
646
              next = ptr->next;
647
              if (ptr->mode != no_mode)
648
                {
649
                  rtx mode_set;
650
 
651
                  start_sequence ();
652
                  EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
653
                  mode_set = get_insns ();
654
                  end_sequence ();
655
 
656
                  /* Insert MODE_SET only if it is nonempty.  */
657
                  if (mode_set != NULL_RTX)
658
                    {
659
                      emited = true;
660
                      if (NOTE_P (ptr->insn_ptr)
661
                          && (NOTE_LINE_NUMBER (ptr->insn_ptr)
662
                              == NOTE_INSN_BASIC_BLOCK))
663
                        emit_insn_after (mode_set, ptr->insn_ptr);
664
                      else
665
                        emit_insn_before (mode_set, ptr->insn_ptr);
666
                    }
667
                }
668
 
669
              free (ptr);
670
            }
671
        }
672
 
673
      free (bb_info[j]);
674
    }
675
 
676
  /* Finished. Free up all the things we've allocated.  */
677
 
678
  sbitmap_vector_free (kill);
679
  sbitmap_vector_free (antic);
680
  sbitmap_vector_free (transp);
681
  sbitmap_vector_free (comp);
682
 
683
  if (need_commit)
684
    commit_edge_insertions ();
685
 
686
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
687
  cleanup_cfg (CLEANUP_NO_INSN_DEL);
688
#else
689
  if (!need_commit && !emited)
690
    return 0;
691
#endif
692
 
693
  max_regno = max_reg_num ();
694
  allocate_reg_info (max_regno, FALSE, FALSE);
695
  update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
696
                                    (PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE
697
                                     | PROP_SCAN_DEAD_CODE));
698
 
699
  return 1;
700
}
701
 
702
#endif /* OPTIMIZE_MODE_SWITCHING */
703
 
704
static bool
705
gate_mode_switching (void)
706
{
707
#ifdef OPTIMIZE_MODE_SWITCHING
708
  return true;
709
#else
710
  return false;
711
#endif
712
}
713
 
714
static unsigned int
715
rest_of_handle_mode_switching (void)
716
{
717
#ifdef OPTIMIZE_MODE_SWITCHING
718
  no_new_pseudos = 0;
719
  optimize_mode_switching ();
720
  no_new_pseudos = 1;
721
#endif /* OPTIMIZE_MODE_SWITCHING */
722
  return 0;
723
}
724
 
725
 
726
struct tree_opt_pass pass_mode_switching =
727
{
728
  "mode-sw",                            /* name */
729
  gate_mode_switching,                  /* gate */
730
  rest_of_handle_mode_switching,        /* execute */
731
  NULL,                                 /* sub */
732
  NULL,                                 /* next */
733
  0,                                    /* static_pass_number */
734
  TV_MODE_SWITCH,                       /* tv_id */
735
  0,                                    /* properties_required */
736
  0,                                    /* properties_provided */
737
  0,                                    /* properties_destroyed */
738
  0,                                    /* todo_flags_start */
739
  TODO_dump_func,                       /* todo_flags_finish */
740
 
741
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.