OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [mode-switching.c] - Blame information for rev 774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* CPU mode switching
2
   Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3
   2009, 2010 Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "target.h"
26
#include "rtl.h"
27
#include "regs.h"
28
#include "hard-reg-set.h"
29
#include "flags.h"
30
#include "insn-config.h"
31
#include "recog.h"
32
#include "basic-block.h"
33
#include "output.h"
34
#include "tm_p.h"
35
#include "function.h"
36
#include "tree-pass.h"
37
#include "timevar.h"
38
#include "df.h"
39
#include "emit-rtl.h"
40
 
41
/* We want target macros for the mode switching code to be able to refer
42
   to instruction attribute values.  */
43
#include "insn-attr.h"
44
 
45
#ifdef OPTIMIZE_MODE_SWITCHING
46
 
47
/* The algorithm for setting the modes consists of scanning the insn list
48
   and finding all the insns which require a specific mode.  Each insn gets
49
   a unique struct seginfo element.  These structures are inserted into a list
50
   for each basic block.  For each entity, there is an array of bb_info over
51
   the flow graph basic blocks (local var 'bb_info'), and contains a list
52
   of all insns within that basic block, in the order they are encountered.
53
 
54
   For each entity, any basic block WITHOUT any insns requiring a specific
55
   mode are given a single entry, without a mode.  (Each basic block
56
   in the flow graph must have at least one entry in the segment table.)
57
 
58
   The LCM algorithm is then run over the flow graph to determine where to
59
   place the sets to the highest-priority value in respect of first the first
60
   insn in any one block.  Any adjustments required to the transparency
61
   vectors are made, then the next iteration starts for the next-lower
62
   priority mode, till for each entity all modes are exhausted.
63
 
64
   More details are located in the code for optimize_mode_switching().  */
65
 
66
/* This structure contains the information for each insn which requires
67
   either single or double mode to be set.
68
   MODE is the mode this insn must be executed in.
69
   INSN_PTR is the insn to be executed (may be the note that marks the
70
   beginning of a basic block).
71
   BBNUM is the flow graph basic block this insn occurs in.
72
   NEXT is the next insn in the same basic block.  */
73
struct seginfo
74
{
75
  int mode;
76
  rtx insn_ptr;
77
  int bbnum;
78
  struct seginfo *next;
79
  HARD_REG_SET regs_live;
80
};
81
 
82
struct bb_info
83
{
84
  struct seginfo *seginfo;
85
  int computing;
86
};
87
 
88
/* These bitmaps are used for the LCM algorithm.  */
89
 
90
static sbitmap *antic;
91
static sbitmap *transp;
92
static sbitmap *comp;
93
 
94
static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET);
95
static void add_seginfo (struct bb_info *, struct seginfo *);
96
static void reg_dies (rtx, HARD_REG_SET *);
97
static void reg_becomes_live (rtx, const_rtx, void *);
98
static void make_preds_opaque (basic_block, int);
99
 
100
 
101
/* This function will allocate a new BBINFO structure, initialized
102
   with the MODE, INSN, and basic block BB parameters.  */
103
 
104
static struct seginfo *
105
new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
106
{
107
  struct seginfo *ptr;
108
  ptr = XNEW (struct seginfo);
109
  ptr->mode = mode;
110
  ptr->insn_ptr = insn;
111
  ptr->bbnum = bb;
112
  ptr->next = NULL;
113
  COPY_HARD_REG_SET (ptr->regs_live, regs_live);
114
  return ptr;
115
}
116
 
117
/* Add a seginfo element to the end of a list.
118
   HEAD is a pointer to the list beginning.
119
   INFO is the structure to be linked in.  */
120
 
121
static void
122
add_seginfo (struct bb_info *head, struct seginfo *info)
123
{
124
  struct seginfo *ptr;
125
 
126
  if (head->seginfo == NULL)
127
    head->seginfo = info;
128
  else
129
    {
130
      ptr = head->seginfo;
131
      while (ptr->next != NULL)
132
        ptr = ptr->next;
133
      ptr->next = info;
134
    }
135
}
136
 
137
/* Make all predecessors of basic block B opaque, recursively, till we hit
138
   some that are already non-transparent, or an edge where aux is set; that
139
   denotes that a mode set is to be done on that edge.
140
   J is the bit number in the bitmaps that corresponds to the entity that
141
   we are currently handling mode-switching for.  */
142
 
143
static void
144
make_preds_opaque (basic_block b, int j)
145
{
146
  edge e;
147
  edge_iterator ei;
148
 
149
  FOR_EACH_EDGE (e, ei, b->preds)
150
    {
151
      basic_block pb = e->src;
152
 
153
      if (e->aux || ! TEST_BIT (transp[pb->index], j))
154
        continue;
155
 
156
      RESET_BIT (transp[pb->index], j);
157
      make_preds_opaque (pb, j);
158
    }
159
}
160
 
161
/* Record in LIVE that register REG died.  */
162
 
163
static void
164
reg_dies (rtx reg, HARD_REG_SET *live)
165
{
166
  int regno;
167
 
168
  if (!REG_P (reg))
169
    return;
170
 
171
  regno = REGNO (reg);
172
  if (regno < FIRST_PSEUDO_REGISTER)
173
    remove_from_hard_reg_set (live, GET_MODE (reg), regno);
174
}
175
 
176
/* Record in LIVE that register REG became live.
177
   This is called via note_stores.  */
178
 
179
static void
180
reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
181
{
182
  int regno;
183
 
184
  if (GET_CODE (reg) == SUBREG)
185
    reg = SUBREG_REG (reg);
186
 
187
  if (!REG_P (reg))
188
    return;
189
 
190
  regno = REGNO (reg);
191
  if (regno < FIRST_PSEUDO_REGISTER)
192
    add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
193
}
194
 
195
/* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined
196
   and vice versa.  */
197
#if defined (MODE_ENTRY) != defined (MODE_EXIT)
198
 #error "Both MODE_ENTRY and MODE_EXIT must be defined"
199
#endif
200
 
201
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
202
/* Split the fallthrough edge to the exit block, so that we can note
203
   that there NORMAL_MODE is required.  Return the new block if it's
204
   inserted before the exit block.  Otherwise return null.  */
205
 
206
static basic_block
207
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
208
{
209
  edge eg;
210
  edge_iterator ei;
211
  basic_block pre_exit;
212
 
213
  /* The only non-call predecessor at this stage is a block with a
214
     fallthrough edge; there can be at most one, but there could be
215
     none at all, e.g. when exit is called.  */
216
  pre_exit = 0;
217
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds)
218
    if (eg->flags & EDGE_FALLTHRU)
219
      {
220
        basic_block src_bb = eg->src;
221
        rtx last_insn, ret_reg;
222
 
223
        gcc_assert (!pre_exit);
224
        /* If this function returns a value at the end, we have to
225
           insert the final mode switch before the return value copy
226
           to its hard register.  */
227
        if (EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 1
228
            && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
229
            && GET_CODE (PATTERN (last_insn)) == USE
230
            && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
231
          {
232
            int ret_start = REGNO (ret_reg);
233
            int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
234
            int ret_end = ret_start + nregs;
235
            int short_block = 0;
236
            int maybe_builtin_apply = 0;
237
            int forced_late_switch = 0;
238
            rtx before_return_copy;
239
 
240
            do
241
              {
242
                rtx return_copy = PREV_INSN (last_insn);
243
                rtx return_copy_pat, copy_reg;
244
                int copy_start, copy_num;
245
                int j;
246
 
247
                if (INSN_P (return_copy))
248
                  {
249
                    /* When using SJLJ exceptions, the call to the
250
                       unregister function is inserted between the
251
                       clobber of the return value and the copy.
252
                       We do not want to split the block before this
253
                       or any other call; if we have not found the
254
                       copy yet, the copy must have been deleted.  */
255
                    if (CALL_P (return_copy))
256
                      {
257
                        short_block = 1;
258
                        break;
259
                      }
260
                    return_copy_pat = PATTERN (return_copy);
261
                    switch (GET_CODE (return_copy_pat))
262
                      {
263
                      case USE:
264
                        /* Skip __builtin_apply pattern.  */
265
                        if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
266
                            && (targetm.calls.function_value_regno_p
267
                                (REGNO (XEXP (return_copy_pat, 0)))))
268
                          {
269
                            maybe_builtin_apply = 1;
270
                            last_insn = return_copy;
271
                            continue;
272
                          }
273
                        break;
274
 
275
                      case ASM_OPERANDS:
276
                        /* Skip barrier insns.  */
277
                        if (!MEM_VOLATILE_P (return_copy_pat))
278
                          break;
279
 
280
                        /* Fall through.  */
281
 
282
                      case ASM_INPUT:
283
                      case UNSPEC_VOLATILE:
284
                        last_insn = return_copy;
285
                        continue;
286
 
287
                      default:
288
                        break;
289
                      }
290
 
291
                    /* If the return register is not (in its entirety)
292
                       likely spilled, the return copy might be
293
                       partially or completely optimized away.  */
294
                    return_copy_pat = single_set (return_copy);
295
                    if (!return_copy_pat)
296
                      {
297
                        return_copy_pat = PATTERN (return_copy);
298
                        if (GET_CODE (return_copy_pat) != CLOBBER)
299
                          break;
300
                        else if (!optimize)
301
                          {
302
                            /* This might be (clobber (reg [<result>]))
303
                               when not optimizing.  Then check if
304
                               the previous insn is the clobber for
305
                               the return register.  */
306
                            copy_reg = SET_DEST (return_copy_pat);
307
                            if (GET_CODE (copy_reg) == REG
308
                                && !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
309
                              {
310
                                if (INSN_P (PREV_INSN (return_copy)))
311
                                  {
312
                                    return_copy = PREV_INSN (return_copy);
313
                                    return_copy_pat = PATTERN (return_copy);
314
                                    if (GET_CODE (return_copy_pat) != CLOBBER)
315
                                      break;
316
                                  }
317
                              }
318
                          }
319
                      }
320
                    copy_reg = SET_DEST (return_copy_pat);
321
                    if (GET_CODE (copy_reg) == REG)
322
                      copy_start = REGNO (copy_reg);
323
                    else if (GET_CODE (copy_reg) == SUBREG
324
                             && GET_CODE (SUBREG_REG (copy_reg)) == REG)
325
                      copy_start = REGNO (SUBREG_REG (copy_reg));
326
                    else
327
                      break;
328
                    if (copy_start >= FIRST_PSEUDO_REGISTER)
329
                      break;
330
                    copy_num
331
                      = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
332
 
333
                    /* If the return register is not likely spilled, - as is
334
                       the case for floating point on SH4 - then it might
335
                       be set by an arithmetic operation that needs a
336
                       different mode than the exit block.  */
337
                    for (j = n_entities - 1; j >= 0; j--)
338
                      {
339
                        int e = entity_map[j];
340
                        int mode = MODE_NEEDED (e, return_copy);
341
 
342
                        if (mode != num_modes[e] && mode != MODE_EXIT (e))
343
                          break;
344
                      }
345
                    if (j >= 0)
346
                      {
347
                        /* For the SH4, floating point loads depend on fpscr,
348
                           thus we might need to put the final mode switch
349
                           after the return value copy.  That is still OK,
350
                           because a floating point return value does not
351
                           conflict with address reloads.  */
352
                        if (copy_start >= ret_start
353
                            && copy_start + copy_num <= ret_end
354
                            && OBJECT_P (SET_SRC (return_copy_pat)))
355
                          forced_late_switch = 1;
356
                        break;
357
                      }
358
 
359
                    if (copy_start >= ret_start
360
                        && copy_start + copy_num <= ret_end)
361
                      nregs -= copy_num;
362
                    else if (!maybe_builtin_apply
363
                             || !targetm.calls.function_value_regno_p
364
                                 (copy_start))
365
                      break;
366
                    last_insn = return_copy;
367
                  }
368
                /* ??? Exception handling can lead to the return value
369
                   copy being already separated from the return value use,
370
                   as in  unwind-dw2.c .
371
                   Similarly, conditionally returning without a value,
372
                   and conditionally using builtin_return can lead to an
373
                   isolated use.  */
374
                if (return_copy == BB_HEAD (src_bb))
375
                  {
376
                    short_block = 1;
377
                    break;
378
                  }
379
                last_insn = return_copy;
380
              }
381
            while (nregs);
382
 
383
            /* If we didn't see a full return value copy, verify that there
384
               is a plausible reason for this.  If some, but not all of the
385
               return register is likely spilled, we can expect that there
386
               is a copy for the likely spilled part.  */
387
            gcc_assert (!nregs
388
                        || forced_late_switch
389
                        || short_block
390
                        || !(targetm.class_likely_spilled_p
391
                             (REGNO_REG_CLASS (ret_start)))
392
                        || (nregs
393
                            != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
394
                        /* For multi-hard-register floating point
395
                           values, sometimes the likely-spilled part
396
                           is ordinarily copied first, then the other
397
                           part is set with an arithmetic operation.
398
                           This doesn't actually cause reload
399
                           failures, so let it pass.  */
400
                        || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
401
                            && nregs != 1));
402
 
403
            if (INSN_P (last_insn))
404
              {
405
                before_return_copy
406
                  = emit_note_before (NOTE_INSN_DELETED, last_insn);
407
                /* Instructions preceding LAST_INSN in the same block might
408
                   require a different mode than MODE_EXIT, so if we might
409
                   have such instructions, keep them in a separate block
410
                   from pre_exit.  */
411
                if (last_insn != BB_HEAD (src_bb))
412
                  src_bb = split_block (src_bb,
413
                                        PREV_INSN (before_return_copy))->dest;
414
              }
415
            else
416
              before_return_copy = last_insn;
417
            pre_exit = split_block (src_bb, before_return_copy)->src;
418
          }
419
        else
420
          {
421
            pre_exit = split_edge (eg);
422
          }
423
      }
424
 
425
  return pre_exit;
426
}
427
#endif
428
 
429
/* Find all insns that need a particular mode setting, and insert the
430
   necessary mode switches.  Return true if we did work.  */
431
 
432
static int
433
optimize_mode_switching (void)
434
{
435
  rtx insn;
436
  int e;
437
  basic_block bb;
438
  int need_commit = 0;
439
  sbitmap *kill;
440
  struct edge_list *edge_list;
441
  static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
442
#define N_ENTITIES ARRAY_SIZE (num_modes)
443
  int entity_map[N_ENTITIES];
444
  struct bb_info *bb_info[N_ENTITIES];
445
  int i, j;
446
  int n_entities;
447
  int max_num_modes = 0;
448
  bool emited ATTRIBUTE_UNUSED = false;
449
  basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED;
450
 
451
  for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
452
    if (OPTIMIZE_MODE_SWITCHING (e))
453
      {
454
        int entry_exit_extra = 0;
455
 
456
        /* Create the list of segments within each basic block.
457
           If NORMAL_MODE is defined, allow for two extra
458
           blocks split from the entry and exit block.  */
459
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
460
        entry_exit_extra = 3;
461
#endif
462
        bb_info[n_entities]
463
          = XCNEWVEC (struct bb_info, last_basic_block + entry_exit_extra);
464
        entity_map[n_entities++] = e;
465
        if (num_modes[e] > max_num_modes)
466
          max_num_modes = num_modes[e];
467
      }
468
 
469
  if (! n_entities)
470
    return 0;
471
 
472
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
473
  /* Split the edge from the entry block, so that we can note that
474
     there NORMAL_MODE is supplied.  */
475
  post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
476
  pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
477
#endif
478
 
479
  df_analyze ();
480
 
481
  /* Create the bitmap vectors.  */
482
 
483
  antic = sbitmap_vector_alloc (last_basic_block, n_entities);
484
  transp = sbitmap_vector_alloc (last_basic_block, n_entities);
485
  comp = sbitmap_vector_alloc (last_basic_block, n_entities);
486
 
487
  sbitmap_vector_ones (transp, last_basic_block);
488
 
489
  for (j = n_entities - 1; j >= 0; j--)
490
    {
491
      int e = entity_map[j];
492
      int no_mode = num_modes[e];
493
      struct bb_info *info = bb_info[j];
494
 
495
      /* Determine what the first use (if any) need for a mode of entity E is.
496
         This will be the mode that is anticipatable for this block.
497
         Also compute the initial transparency settings.  */
498
      FOR_EACH_BB (bb)
499
        {
500
          struct seginfo *ptr;
501
          int last_mode = no_mode;
502
          bool any_set_required = false;
503
          HARD_REG_SET live_now;
504
 
505
          REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
506
 
507
          /* Pretend the mode is clobbered across abnormal edges.  */
508
          {
509
            edge_iterator ei;
510
            edge e;
511
            FOR_EACH_EDGE (e, ei, bb->preds)
512
              if (e->flags & EDGE_COMPLEX)
513
                break;
514
            if (e)
515
              {
516
                ptr = new_seginfo (no_mode, BB_HEAD (bb), bb->index, live_now);
517
                add_seginfo (info + bb->index, ptr);
518
                RESET_BIT (transp[bb->index], j);
519
              }
520
          }
521
 
522
          FOR_BB_INSNS (bb, insn)
523
            {
524
              if (INSN_P (insn))
525
                {
526
                  int mode = MODE_NEEDED (e, insn);
527
                  rtx link;
528
 
529
                  if (mode != no_mode && mode != last_mode)
530
                    {
531
                      any_set_required = true;
532
                      last_mode = mode;
533
                      ptr = new_seginfo (mode, insn, bb->index, live_now);
534
                      add_seginfo (info + bb->index, ptr);
535
                      RESET_BIT (transp[bb->index], j);
536
                    }
537
#ifdef MODE_AFTER
538
                  last_mode = MODE_AFTER (last_mode, insn);
539
#endif
540
                  /* Update LIVE_NOW.  */
541
                  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
542
                    if (REG_NOTE_KIND (link) == REG_DEAD)
543
                      reg_dies (XEXP (link, 0), &live_now);
544
 
545
                  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
546
                  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
547
                    if (REG_NOTE_KIND (link) == REG_UNUSED)
548
                      reg_dies (XEXP (link, 0), &live_now);
549
                }
550
            }
551
 
552
          info[bb->index].computing = last_mode;
553
          /* Check for blocks without ANY mode requirements.
554
             N.B. because of MODE_AFTER, last_mode might still be different
555
             from no_mode.  */
556
          if (!any_set_required)
557
            {
558
              ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
559
              add_seginfo (info + bb->index, ptr);
560
            }
561
        }
562
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
563
      {
564
        int mode = MODE_ENTRY (e);
565
 
566
        if (mode != no_mode)
567
          {
568
            bb = post_entry;
569
 
570
            /* By always making this nontransparent, we save
571
               an extra check in make_preds_opaque.  We also
572
               need this to avoid confusing pre_edge_lcm when
573
               antic is cleared but transp and comp are set.  */
574
            RESET_BIT (transp[bb->index], j);
575
 
576
            /* Insert a fake computing definition of MODE into entry
577
               blocks which compute no mode. This represents the mode on
578
               entry.  */
579
            info[bb->index].computing = mode;
580
 
581
            if (pre_exit)
582
              info[pre_exit->index].seginfo->mode = MODE_EXIT (e);
583
          }
584
      }
585
#endif /* NORMAL_MODE */
586
    }
587
 
588
  kill = sbitmap_vector_alloc (last_basic_block, n_entities);
589
  for (i = 0; i < max_num_modes; i++)
590
    {
591
      int current_mode[N_ENTITIES];
592
      sbitmap *del;
593
      sbitmap *insert;
594
 
595
      /* Set the anticipatable and computing arrays.  */
596
      sbitmap_vector_zero (antic, last_basic_block);
597
      sbitmap_vector_zero (comp, last_basic_block);
598
      for (j = n_entities - 1; j >= 0; j--)
599
        {
600
          int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
601
          struct bb_info *info = bb_info[j];
602
 
603
          FOR_EACH_BB (bb)
604
            {
605
              if (info[bb->index].seginfo->mode == m)
606
                SET_BIT (antic[bb->index], j);
607
 
608
              if (info[bb->index].computing == m)
609
                SET_BIT (comp[bb->index], j);
610
            }
611
        }
612
 
613
      /* Calculate the optimal locations for the
614
         placement mode switches to modes with priority I.  */
615
 
616
      FOR_EACH_BB (bb)
617
        sbitmap_not (kill[bb->index], transp[bb->index]);
618
      edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
619
                                kill, &insert, &del);
620
 
621
      for (j = n_entities - 1; j >= 0; j--)
622
        {
623
          /* Insert all mode sets that have been inserted by lcm.  */
624
          int no_mode = num_modes[entity_map[j]];
625
 
626
          /* Wherever we have moved a mode setting upwards in the flow graph,
627
             the blocks between the new setting site and the now redundant
628
             computation ceases to be transparent for any lower-priority
629
             mode of the same entity.  First set the aux field of each
630
             insertion site edge non-transparent, then propagate the new
631
             non-transparency from the redundant computation upwards till
632
             we hit an insertion site or an already non-transparent block.  */
633
          for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
634
            {
635
              edge eg = INDEX_EDGE (edge_list, e);
636
              int mode;
637
              basic_block src_bb;
638
              HARD_REG_SET live_at_edge;
639
              rtx mode_set;
640
 
641
              eg->aux = 0;
642
 
643
              if (! TEST_BIT (insert[e], j))
644
                continue;
645
 
646
              eg->aux = (void *)1;
647
 
648
              mode = current_mode[j];
649
              src_bb = eg->src;
650
 
651
              REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
652
 
653
              start_sequence ();
654
              EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
655
              mode_set = get_insns ();
656
              end_sequence ();
657
 
658
              /* Do not bother to insert empty sequence.  */
659
              if (mode_set == NULL_RTX)
660
                continue;
661
 
662
              /* We should not get an abnormal edge here.  */
663
              gcc_assert (! (eg->flags & EDGE_ABNORMAL));
664
 
665
              need_commit = 1;
666
              insert_insn_on_edge (mode_set, eg);
667
            }
668
 
669
          FOR_EACH_BB_REVERSE (bb)
670
            if (TEST_BIT (del[bb->index], j))
671
              {
672
                make_preds_opaque (bb, j);
673
                /* Cancel the 'deleted' mode set.  */
674
                bb_info[j][bb->index].seginfo->mode = no_mode;
675
              }
676
        }
677
 
678
      sbitmap_vector_free (del);
679
      sbitmap_vector_free (insert);
680
      clear_aux_for_edges ();
681
      free_edge_list (edge_list);
682
    }
683
 
684
  /* Now output the remaining mode sets in all the segments.  */
685
  for (j = n_entities - 1; j >= 0; j--)
686
    {
687
      int no_mode = num_modes[entity_map[j]];
688
 
689
      FOR_EACH_BB_REVERSE (bb)
690
        {
691
          struct seginfo *ptr, *next;
692
          for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
693
            {
694
              next = ptr->next;
695
              if (ptr->mode != no_mode)
696
                {
697
                  rtx mode_set;
698
 
699
                  start_sequence ();
700
                  EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
701
                  mode_set = get_insns ();
702
                  end_sequence ();
703
 
704
                  /* Insert MODE_SET only if it is nonempty.  */
705
                  if (mode_set != NULL_RTX)
706
                    {
707
                      emited = true;
708
                      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
709
                        emit_insn_after (mode_set, ptr->insn_ptr);
710
                      else
711
                        emit_insn_before (mode_set, ptr->insn_ptr);
712
                    }
713
                }
714
 
715
              free (ptr);
716
            }
717
        }
718
 
719
      free (bb_info[j]);
720
    }
721
 
722
  /* Finished. Free up all the things we've allocated.  */
723
  sbitmap_vector_free (kill);
724
  sbitmap_vector_free (antic);
725
  sbitmap_vector_free (transp);
726
  sbitmap_vector_free (comp);
727
 
728
  if (need_commit)
729
    commit_edge_insertions ();
730
 
731
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
732
  cleanup_cfg (CLEANUP_NO_INSN_DEL);
733
#else
734
  if (!need_commit && !emited)
735
    return 0;
736
#endif
737
 
738
  return 1;
739
}
740
 
741
#endif /* OPTIMIZE_MODE_SWITCHING */
742
 
743
static bool
744
gate_mode_switching (void)
745
{
746
#ifdef OPTIMIZE_MODE_SWITCHING
747
  return true;
748
#else
749
  return false;
750
#endif
751
}
752
 
753
static unsigned int
754
rest_of_handle_mode_switching (void)
755
{
756
#ifdef OPTIMIZE_MODE_SWITCHING
757
  optimize_mode_switching ();
758
#endif /* OPTIMIZE_MODE_SWITCHING */
759
  return 0;
760
}
761
 
762
 
763
struct rtl_opt_pass pass_mode_switching =
764
{
765
 {
766
  RTL_PASS,
767
  "mode_sw",                            /* name */
768
  gate_mode_switching,                  /* gate */
769
  rest_of_handle_mode_switching,        /* execute */
770
  NULL,                                 /* sub */
771
  NULL,                                 /* next */
772
  0,                                    /* static_pass_number */
773
  TV_MODE_SWITCH,                       /* tv_id */
774
  0,                                    /* properties_required */
775
  0,                                    /* properties_provided */
776
  0,                                    /* properties_destroyed */
777
  0,                                    /* todo_flags_start */
778
  TODO_df_finish | TODO_verify_rtl_sharing |
779
 
780
 }
781
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.