OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [ira-emit.c] - Blame information for rev 320

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Integrated Register Allocator.  Changing code and generating moves.
2
   Copyright (C) 2006, 2007, 2008, 2009
3
   Free Software Foundation, Inc.
4
   Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "regs.h"
28
#include "rtl.h"
29
#include "tm_p.h"
30
#include "target.h"
31
#include "flags.h"
32
#include "obstack.h"
33
#include "bitmap.h"
34
#include "hard-reg-set.h"
35
#include "basic-block.h"
36
#include "expr.h"
37
#include "recog.h"
38
#include "params.h"
39
#include "timevar.h"
40
#include "tree-pass.h"
41
#include "output.h"
42
#include "reload.h"
43
#include "errors.h"
44
#include "df.h"
45
#include "ira-int.h"
46
 
47
 
48
typedef struct move *move_t;
49
 
50
/* The structure represents an allocno move.  Both allocnos have the
51
   same origional regno but different allocation.  */
52
struct move
53
{
54
  /* The allocnos involved in the move.  */
55
  ira_allocno_t from, to;
56
  /* The next move in the move sequence.  */
57
  move_t next;
58
  /* Used for finding dependencies.  */
59
  bool visited_p;
60
  /* The size of the following array. */
61
  int deps_num;
62
  /* Moves on which given move depends on.  Dependency can be cyclic.
63
     It means we need a temporary to generates the moves.  Sequence
64
     A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
65
     B1 are assigned to reg R2 is an example of the cyclic
66
     dependencies.  */
67
  move_t *deps;
68
  /* First insn generated for the move.  */
69
  rtx insn;
70
};
71
 
72
/* Array of moves (indexed by BB index) which should be put at the
73
   start/end of the corresponding basic blocks.  */
74
static move_t *at_bb_start, *at_bb_end;
75
 
76
/* Max regno before renaming some pseudo-registers.  For example, the
77
   same pseudo-register can be renamed in a loop if its allocation is
78
   different outside the loop.  */
79
static int max_regno_before_changing;
80
 
81
/* Return new move of allocnos TO and FROM.  */
82
static move_t
83
create_move (ira_allocno_t to, ira_allocno_t from)
84
{
85
  move_t move;
86
 
87
  move = (move_t) ira_allocate (sizeof (struct move));
88
  move->deps = NULL;
89
  move->deps_num = 0;
90
  move->to = to;
91
  move->from = from;
92
  move->next = NULL;
93
  move->insn = NULL_RTX;
94
  move->visited_p = false;
95
  return move;
96
}
97
 
98
/* Free memory for MOVE and its dependencies.  */
99
static void
100
free_move (move_t move)
101
{
102
  if (move->deps != NULL)
103
    ira_free (move->deps);
104
  ira_free (move);
105
}
106
 
107
/* Free memory for list of the moves given by its HEAD.  */
108
static void
109
free_move_list (move_t head)
110
{
111
  move_t next;
112
 
113
  for (; head != NULL; head = next)
114
    {
115
      next = head->next;
116
      free_move (head);
117
    }
118
}
119
 
120
/* Return TRUE if the the move list LIST1 and LIST2 are equal (two
121
   moves are equal if they involve the same allocnos).  */
122
static bool
123
eq_move_lists_p (move_t list1, move_t list2)
124
{
125
  for (; list1 != NULL && list2 != NULL;
126
       list1 = list1->next, list2 = list2->next)
127
    if (list1->from != list2->from || list1->to != list2->to)
128
      return false;
129
  return list1 == list2;
130
}
131
 
132
/* Print move list LIST into file F.  */
133
static void
134
print_move_list (FILE *f, move_t list)
135
{
136
  for (; list != NULL; list = list->next)
137
    fprintf (f, " a%dr%d->a%dr%d",
138
             ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
139
             ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
140
  fprintf (f, "\n");
141
}
142
 
143
extern void ira_debug_move_list (move_t list);
144
 
145
/* Print move list LIST into stderr.  */
146
void
147
ira_debug_move_list (move_t list)
148
{
149
  print_move_list (stderr, list);
150
}
151
 
152
/* This recursive function changes pseudo-registers in *LOC if it is
153
   necessary.  The function returns TRUE if a change was done.  */
154
static bool
155
change_regs (rtx *loc)
156
{
157
  int i, regno, result = false;
158
  const char *fmt;
159
  enum rtx_code code;
160
  rtx reg;
161
 
162
  if (*loc == NULL_RTX)
163
    return false;
164
  code = GET_CODE (*loc);
165
  if (code == REG)
166
    {
167
      regno = REGNO (*loc);
168
      if (regno < FIRST_PSEUDO_REGISTER)
169
        return false;
170
      if (regno >= max_regno_before_changing)
171
        /* It is a shared register which was changed already.  */
172
        return false;
173
      if (ira_curr_regno_allocno_map[regno] == NULL)
174
        return false;
175
      reg = ALLOCNO_REG (ira_curr_regno_allocno_map[regno]);
176
      if (reg == *loc)
177
        return false;
178
      *loc = reg;
179
      return true;
180
    }
181
 
182
  fmt = GET_RTX_FORMAT (code);
183
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
184
    {
185
      if (fmt[i] == 'e')
186
        result = change_regs (&XEXP (*loc, i)) || result;
187
      else if (fmt[i] == 'E')
188
        {
189
          int j;
190
 
191
          for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
192
            result = change_regs (&XVECEXP (*loc, i, j)) || result;
193
        }
194
    }
195
  return result;
196
}
197
 
198
/* Attach MOVE to the edge E.  The move is attached to the head of the
199
   list if HEAD_P is TRUE.  */
200
static void
201
add_to_edge_list (edge e, move_t move, bool head_p)
202
{
203
  move_t last;
204
 
205
  if (head_p || e->aux == NULL)
206
    {
207
      move->next = (move_t) e->aux;
208
      e->aux = move;
209
    }
210
  else
211
    {
212
      for (last = (move_t) e->aux; last->next != NULL; last = last->next)
213
        ;
214
      last->next = move;
215
      move->next = NULL;
216
    }
217
}
218
 
219
/* Create and return new pseudo-register with the same attributes as
220
   ORIGINAL_REG.  */
221
static rtx
222
create_new_reg (rtx original_reg)
223
{
224
  rtx new_reg;
225
 
226
  new_reg = gen_reg_rtx (GET_MODE (original_reg));
227
  ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
228
  REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
229
  REG_POINTER (new_reg) = REG_POINTER (original_reg);
230
  REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
231
  if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
232
    fprintf (ira_dump_file, "      Creating newreg=%i from oldreg=%i\n",
233
             REGNO (new_reg), REGNO (original_reg));
234
  return new_reg;
235
}
236
 
237
/* Return TRUE if loop given by SUBNODE inside the loop given by
238
   NODE.  */
239
static bool
240
subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
241
{
242
  for (; subnode != NULL; subnode = subnode->parent)
243
    if (subnode == node)
244
      return true;
245
  return false;
246
}
247
 
248
/* Set up member `reg' to REG for allocnos which has the same regno as
249
   ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
250
static void
251
set_allocno_reg (ira_allocno_t allocno, rtx reg)
252
{
253
  int regno;
254
  ira_allocno_t a;
255
  ira_loop_tree_node_t node;
256
 
257
  node = ALLOCNO_LOOP_TREE_NODE (allocno);
258
  for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
259
       a != NULL;
260
       a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
261
    if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
262
      ALLOCNO_REG (a) = reg;
263
  for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
264
    ALLOCNO_REG (a) = reg;
265
  regno = ALLOCNO_REGNO (allocno);
266
  for (a = allocno;;)
267
    {
268
      if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
269
        {
270
          node = node->parent;
271
          if (node == NULL)
272
            break;
273
          a = node->regno_allocno_map[regno];
274
        }
275
      if (a == NULL)
276
        continue;
277
      if (ALLOCNO_CHILD_RENAMED_P (a))
278
        break;
279
      ALLOCNO_CHILD_RENAMED_P (a) = true;
280
    }
281
}
282
 
283
/* Return true if there is an entry to given loop not from its parent
284
   (or grandparent) block.  For example, it is possible for two
285
   adjacent loops inside another loop.  */
286
static bool
287
entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
288
{
289
  ira_loop_tree_node_t bb_node, src_loop_node, parent;
290
  edge e;
291
  edge_iterator ei;
292
 
293
  for (bb_node = loop_node->children; bb_node != NULL; bb_node = bb_node->next)
294
    if (bb_node->bb != NULL)
295
      {
296
        FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
297
          if (e->src != ENTRY_BLOCK_PTR
298
              && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
299
            {
300
              for (parent = src_loop_node->parent;
301
                   parent != NULL;
302
                   parent = parent->parent)
303
                if (parent == loop_node)
304
                  break;
305
              if (parent != NULL)
306
                /* That is an exit from a nested loop -- skip it.  */
307
                continue;
308
              for (parent = loop_node->parent;
309
                   parent != NULL;
310
                   parent = parent->parent)
311
                if (src_loop_node == parent)
312
                  break;
313
              if (parent == NULL)
314
                return true;
315
            }
316
      }
317
  return false;
318
}
319
 
320
/* Set up ENTERED_FROM_NON_PARENT_P for each loop region.  */
321
static void
322
setup_entered_from_non_parent_p (void)
323
{
324
  unsigned int i;
325
  loop_p loop;
326
 
327
  for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
328
    if (ira_loop_nodes[i].regno_allocno_map != NULL)
329
      ira_loop_nodes[i].entered_from_non_parent_p
330
        = entered_from_non_parent_p (&ira_loop_nodes[i]);
331
}
332
 
333
/* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
334
   DEST_ALLOCNO (assigned to memory) can be removed beacuse it does
335
   not change value of the destination.  One possible reason for this
336
   is the situation when SRC_ALLOCNO is not modified in the
337
   corresponding loop.  */
338
static bool
339
store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
340
{
341
  int regno, orig_regno;
342
  ira_allocno_t a;
343
  ira_loop_tree_node_t node;
344
 
345
  ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
346
              && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
347
  orig_regno = ALLOCNO_REGNO (src_allocno);
348
  regno = REGNO (ALLOCNO_REG (dest_allocno));
349
  for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
350
       node != NULL;
351
       node = node->parent)
352
    {
353
      a = node->regno_allocno_map[orig_regno];
354
      ira_assert (a != NULL);
355
      if (REGNO (ALLOCNO_REG (a)) == (unsigned) regno)
356
        /* We achieved the destination and everything is ok.  */
357
        return true;
358
      else if (bitmap_bit_p (node->modified_regnos, orig_regno))
359
        return false;
360
      else if (node->entered_from_non_parent_p)
361
        /* If there is a path from a destination loop block to the
362
           source loop header containing basic blocks of non-parents
363
           (grandparents) of the source loop, we should have checked
364
           modifications of the pseudo on this path too to decide
365
           about possibility to remove the store.  It could be done by
366
           solving a data-flow problem.  Unfortunately such global
367
           solution would complicate IR flattening.  Therefore we just
368
           prohibit removal of the store in such complicated case.  */
369
        return false;
370
    }
371
  gcc_unreachable ();
372
}
373
 
374
/* Generate and attach moves to the edge E.  This looks at the final
375
   regnos of allocnos living on the edge with the same original regno
376
   to figure out when moves should be generated.  */
377
static void
378
generate_edge_moves (edge e)
379
{
380
  ira_loop_tree_node_t src_loop_node, dest_loop_node;
381
  unsigned int regno;
382
  bitmap_iterator bi;
383
  ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
384
  move_t move;
385
 
386
  src_loop_node = IRA_BB_NODE (e->src)->parent;
387
  dest_loop_node = IRA_BB_NODE (e->dest)->parent;
388
  e->aux = NULL;
389
  if (src_loop_node == dest_loop_node)
390
    return;
391
  src_map = src_loop_node->regno_allocno_map;
392
  dest_map = dest_loop_node->regno_allocno_map;
393
  EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (e->dest),
394
                             FIRST_PSEUDO_REGISTER, regno, bi)
395
    if (bitmap_bit_p (DF_LR_OUT (e->src), regno))
396
      {
397
        src_allocno = src_map[regno];
398
        dest_allocno = dest_map[regno];
399
        if (REGNO (ALLOCNO_REG (src_allocno))
400
            == REGNO (ALLOCNO_REG (dest_allocno)))
401
          continue;
402
        /* Remove unnecessary stores at the region exit.  We should do
403
           this for readonly memory for sure and this is guaranteed by
404
           that we never generate moves on region borders (see
405
           checking ira_reg_equiv_invariant_p in function
406
           change_loop).  */
407
        if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
408
            && ALLOCNO_HARD_REGNO (src_allocno) >= 0
409
            && store_can_be_removed_p (src_allocno, dest_allocno))
410
          {
411
            ALLOCNO_MEM_OPTIMIZED_DEST (src_allocno) = dest_allocno;
412
            ALLOCNO_MEM_OPTIMIZED_DEST_P (dest_allocno) = true;
413
            if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
414
              fprintf (ira_dump_file, "      Remove r%d:a%d->a%d(mem)\n",
415
                       regno, ALLOCNO_NUM (src_allocno),
416
                       ALLOCNO_NUM (dest_allocno));
417
            continue;
418
          }
419
        move = create_move (dest_allocno, src_allocno);
420
        add_to_edge_list (e, move, true);
421
    }
422
}
423
 
424
/* Bitmap of allocnos local for the current loop.  */
425
static bitmap local_allocno_bitmap;
426
 
427
/* This bitmap is used to find that we need to generate and to use a
428
   new pseudo-register when processing allocnos with the same original
429
   regno.  */
430
static bitmap used_regno_bitmap;
431
 
432
/* This bitmap contains regnos of allocnos which were renamed locally
433
   because the allocnos correspond to disjoint live ranges in loops
434
   with a common parent.  */
435
static bitmap renamed_regno_bitmap;
436
 
437
/* Change (if necessary) pseudo-registers inside loop given by loop
438
   tree node NODE.  */
439
static void
440
change_loop (ira_loop_tree_node_t node)
441
{
442
  bitmap_iterator bi;
443
  unsigned int i;
444
  int regno;
445
  bool used_p;
446
  ira_allocno_t allocno, parent_allocno, *map;
447
  rtx insn, original_reg;
448
  enum reg_class cover_class;
449
  ira_loop_tree_node_t parent;
450
 
451
  if (node != ira_loop_tree_root)
452
    {
453
 
454
      if (node->bb != NULL)
455
        {
456
          FOR_BB_INSNS (node->bb, insn)
457
            if (INSN_P (insn) && change_regs (&insn))
458
              {
459
                df_insn_rescan (insn);
460
                df_notes_rescan (insn);
461
              }
462
          return;
463
        }
464
 
465
      if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
466
        fprintf (ira_dump_file,
467
                 "      Changing RTL for loop %d (header bb%d)\n",
468
                 node->loop->num, node->loop->header->index);
469
 
470
      parent = ira_curr_loop_tree_node->parent;
471
      map = parent->regno_allocno_map;
472
      EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
473
                                 0, i, bi)
474
        {
475
          allocno = ira_allocnos[i];
476
          regno = ALLOCNO_REGNO (allocno);
477
          cover_class = ALLOCNO_COVER_CLASS (allocno);
478
          parent_allocno = map[regno];
479
          ira_assert (regno < ira_reg_equiv_len);
480
          /* We generate the same hard register move because the
481
             reload pass can put an allocno into memory in this case
482
             we will have live range splitting.  If it does not happen
483
             such the same hard register moves will be removed.  The
484
             worst case when the both allocnos are put into memory by
485
             the reload is very rare.  */
486
          if (parent_allocno != NULL
487
              && (ALLOCNO_HARD_REGNO (allocno)
488
                  == ALLOCNO_HARD_REGNO (parent_allocno))
489
              && (ALLOCNO_HARD_REGNO (allocno) < 0
490
                  || (parent->reg_pressure[cover_class] + 1
491
                      <= ira_available_class_regs[cover_class])
492
                  || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
493
                                        [ALLOCNO_MODE (allocno)],
494
                                        ALLOCNO_HARD_REGNO (allocno))
495
                  /* don't create copies because reload can spill an
496
                     allocno set by copy although the allocno will not
497
                     get memory slot.  */
498
                  || ira_reg_equiv_invariant_p[regno]
499
                  || ira_reg_equiv_const[regno] != NULL_RTX))
500
            continue;
501
          original_reg = ALLOCNO_REG (allocno);
502
          if (parent_allocno == NULL
503
              || REGNO (ALLOCNO_REG (parent_allocno)) == REGNO (original_reg))
504
            {
505
              if (internal_flag_ira_verbose > 3 && ira_dump_file)
506
                fprintf (ira_dump_file, "  %i vs parent %i:",
507
                         ALLOCNO_HARD_REGNO (allocno),
508
                         ALLOCNO_HARD_REGNO (parent_allocno));
509
              set_allocno_reg (allocno, create_new_reg (original_reg));
510
            }
511
        }
512
    }
513
  /* Rename locals: Local allocnos with same regno in different loops
514
     might get the different hard register.  So we need to change
515
     ALLOCNO_REG.  */
516
  bitmap_and_compl (local_allocno_bitmap,
517
                    ira_curr_loop_tree_node->all_allocnos,
518
                    ira_curr_loop_tree_node->border_allocnos);
519
  EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
520
    {
521
      allocno = ira_allocnos[i];
522
      regno = ALLOCNO_REGNO (allocno);
523
      if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
524
        continue;
525
      used_p = bitmap_bit_p (used_regno_bitmap, regno);
526
      bitmap_set_bit (used_regno_bitmap, regno);
527
      ALLOCNO_SOMEWHERE_RENAMED_P (allocno) = true;
528
      if (! used_p)
529
        continue;
530
      bitmap_set_bit (renamed_regno_bitmap, regno);
531
      set_allocno_reg (allocno, create_new_reg (ALLOCNO_REG (allocno)));
532
    }
533
}
534
 
535
/* Process to set up flag somewhere_renamed_p.  */
536
static void
537
set_allocno_somewhere_renamed_p (void)
538
{
539
  unsigned int regno;
540
  ira_allocno_t allocno;
541
  ira_allocno_iterator ai;
542
 
543
  FOR_EACH_ALLOCNO (allocno, ai)
544
    {
545
      regno = ALLOCNO_REGNO (allocno);
546
      if (bitmap_bit_p (renamed_regno_bitmap, regno)
547
          && REGNO (ALLOCNO_REG (allocno)) == regno)
548
        ALLOCNO_SOMEWHERE_RENAMED_P (allocno) = true;
549
    }
550
}
551
 
552
/* Return TRUE if move lists on all edges given in vector VEC are
553
   equal.  */
554
static bool
555
eq_edge_move_lists_p (VEC(edge,gc) *vec)
556
{
557
  move_t list;
558
  int i;
559
 
560
  list = (move_t) EDGE_I (vec, 0)->aux;
561
  for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
562
    if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
563
      return false;
564
  return true;
565
}
566
 
567
/* Look at all entry edges (if START_P) or exit edges of basic block
568
   BB and put move lists at the BB start or end if it is possible.  In
569
   other words, this decreases code duplication of allocno moves.  */
570
static void
571
unify_moves (basic_block bb, bool start_p)
572
{
573
  int i;
574
  edge e;
575
  move_t list;
576
  VEC(edge,gc) *vec;
577
 
578
  vec = (start_p ? bb->preds : bb->succs);
579
  if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
580
    return;
581
  e = EDGE_I (vec, 0);
582
  list = (move_t) e->aux;
583
  if (! start_p && control_flow_insn_p (BB_END (bb)))
584
    return;
585
  e->aux = NULL;
586
  for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
587
    {
588
      e = EDGE_I (vec, i);
589
      free_move_list ((move_t) e->aux);
590
      e->aux = NULL;
591
    }
592
  if (start_p)
593
    at_bb_start[bb->index] = list;
594
  else
595
    at_bb_end[bb->index] = list;
596
}
597
 
598
/* Last move (in move sequence being processed) setting up the
599
   corresponding hard register.  */
600
static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
601
 
602
/* If the element value is equal to CURR_TICK then the corresponding
603
   element in `hard_regno_last_set' is defined and correct.  */
604
static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
605
 
606
/* Last move (in move sequence being processed) setting up the
607
   corresponding allocno.  */
608
static move_t *allocno_last_set;
609
 
610
/* If the element value is equal to CURR_TICK then the corresponding
611
   element in . `allocno_last_set' is defined and correct.  */
612
static int *allocno_last_set_check;
613
 
614
/* Definition of vector of moves.  */
615
DEF_VEC_P(move_t);
616
DEF_VEC_ALLOC_P(move_t, heap);
617
 
618
/* This vec contains moves sorted topologically (depth-first) on their
619
   dependency graph.  */
620
static VEC(move_t,heap) *move_vec;
621
 
622
/* The variable value is used to check correctness of values of
623
   elements of arrays `hard_regno_last_set' and
624
   `allocno_last_set_check'.  */
625
static int curr_tick;
626
 
627
/* This recursive function traverses dependencies of MOVE and produces
628
   topological sorting (in depth-first order).  */
629
static void
630
traverse_moves (move_t move)
631
{
632
  int i;
633
 
634
  if (move->visited_p)
635
    return;
636
  move->visited_p = true;
637
  for (i = move->deps_num - 1; i >= 0; i--)
638
    traverse_moves (move->deps[i]);
639
  VEC_safe_push (move_t, heap, move_vec, move);
640
}
641
 
642
/* Remove unnecessary moves in the LIST, makes topological sorting,
643
   and removes cycles on hard reg dependencies by introducing new
644
   allocnos assigned to memory and additional moves.  It returns the
645
   result move list.  */
646
static move_t
647
modify_move_list (move_t list)
648
{
649
  int i, n, nregs, hard_regno;
650
  ira_allocno_t to, from, new_allocno;
651
  move_t move, new_move, set_move, first, last;
652
 
653
  if (list == NULL)
654
    return NULL;
655
  /* Creat move deps.  */
656
  curr_tick++;
657
  for (move = list; move != NULL; move = move->next)
658
    {
659
      to = move->to;
660
      if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
661
        continue;
662
      nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
663
      for (i = 0; i < nregs; i++)
664
        {
665
          hard_regno_last_set[hard_regno + i] = move;
666
          hard_regno_last_set_check[hard_regno + i] = curr_tick;
667
        }
668
    }
669
  for (move = list; move != NULL; move = move->next)
670
    {
671
      from = move->from;
672
      to = move->to;
673
      if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
674
        {
675
          nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
676
          for (n = i = 0; i < nregs; i++)
677
            if (hard_regno_last_set_check[hard_regno + i] == curr_tick
678
                && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
679
                    != ALLOCNO_REGNO (from)))
680
              n++;
681
          move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
682
          for (n = i = 0; i < nregs; i++)
683
            if (hard_regno_last_set_check[hard_regno + i] == curr_tick
684
                && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
685
                    != ALLOCNO_REGNO (from)))
686
              move->deps[n++] = hard_regno_last_set[hard_regno + i];
687
          move->deps_num = n;
688
        }
689
    }
690
  /* Toplogical sorting:  */
691
  VEC_truncate (move_t, move_vec, 0);
692
  for (move = list; move != NULL; move = move->next)
693
    traverse_moves (move);
694
  last = NULL;
695
  for (i = (int) VEC_length (move_t, move_vec) - 1; i >= 0; i--)
696
    {
697
      move = VEC_index (move_t, move_vec, i);
698
      move->next = NULL;
699
      if (last != NULL)
700
        last->next = move;
701
      last = move;
702
    }
703
  first = VEC_last (move_t, move_vec);
704
  /* Removing cycles:  */
705
  curr_tick++;
706
  VEC_truncate (move_t, move_vec, 0);
707
  for (move = first; move != NULL; move = move->next)
708
    {
709
      from = move->from;
710
      to = move->to;
711
      if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
712
        {
713
          nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
714
          for (i = 0; i < nregs; i++)
715
            if (hard_regno_last_set_check[hard_regno + i] == curr_tick
716
                && ALLOCNO_HARD_REGNO
717
                   (hard_regno_last_set[hard_regno + i]->to) >= 0)
718
              {
719
                set_move = hard_regno_last_set[hard_regno + i];
720
                /* It does not matter what loop_tree_node (of TO or
721
                   FROM) to use for the new allocno because of
722
                   subsequent IRA internal representation
723
                   flattening.  */
724
                new_allocno
725
                  = ira_create_allocno (ALLOCNO_REGNO (set_move->to), false,
726
                                        ALLOCNO_LOOP_TREE_NODE (set_move->to));
727
                ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
728
                ira_set_allocno_cover_class
729
                  (new_allocno, ALLOCNO_COVER_CLASS (set_move->to));
730
                ALLOCNO_ASSIGNED_P (new_allocno) = true;
731
                ALLOCNO_HARD_REGNO (new_allocno) = -1;
732
                ALLOCNO_REG (new_allocno)
733
                  = create_new_reg (ALLOCNO_REG (set_move->to));
734
                ALLOCNO_CONFLICT_ID (new_allocno) = ALLOCNO_NUM (new_allocno);
735
                /* Make it possibly conflicting with all earlier
736
                   created allocnos.  Cases where temporary allocnos
737
                   created to remove the cycles are quite rare.  */
738
                ALLOCNO_MIN (new_allocno) = 0;
739
                ALLOCNO_MAX (new_allocno) = ira_allocnos_num - 1;
740
                new_move = create_move (set_move->to, new_allocno);
741
                set_move->to = new_allocno;
742
                VEC_safe_push (move_t, heap, move_vec, new_move);
743
                ira_move_loops_num++;
744
                if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
745
                  fprintf (ira_dump_file,
746
                           "    Creating temporary allocno a%dr%d\n",
747
                           ALLOCNO_NUM (new_allocno),
748
                           REGNO (ALLOCNO_REG (new_allocno)));
749
              }
750
        }
751
      if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
752
        continue;
753
      nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
754
      for (i = 0; i < nregs; i++)
755
        {
756
          hard_regno_last_set[hard_regno + i] = move;
757
          hard_regno_last_set_check[hard_regno + i] = curr_tick;
758
        }
759
    }
760
  for (i = (int) VEC_length (move_t, move_vec) - 1; i >= 0; i--)
761
    {
762
      move = VEC_index (move_t, move_vec, i);
763
      move->next = NULL;
764
      last->next = move;
765
      last = move;
766
    }
767
  return first;
768
}
769
 
770
/* Generate RTX move insns from the move list LIST.  This updates
771
   allocation cost using move execution frequency FREQ.  */
772
static rtx
773
emit_move_list (move_t list, int freq)
774
{
775
  int cost;
776
  rtx result, insn;
777
  enum machine_mode mode;
778
  enum reg_class cover_class;
779
 
780
  start_sequence ();
781
  for (; list != NULL; list = list->next)
782
    {
783
      start_sequence ();
784
      emit_move_insn (ALLOCNO_REG (list->to), ALLOCNO_REG (list->from));
785
      list->insn = get_insns ();
786
      end_sequence ();
787
      /* The reload needs to have set up insn codes.  If the reload
788
         sets up insn codes by itself, it may fail because insns will
789
         have hard registers instead of pseudos and there may be no
790
         machine insn with given hard registers.  */
791
      for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
792
        recog_memoized (insn);
793
      emit_insn (list->insn);
794
      mode = ALLOCNO_MODE (list->to);
795
      cover_class = ALLOCNO_COVER_CLASS (list->to);
796
      cost = 0;
797
      if (ALLOCNO_HARD_REGNO (list->to) < 0)
798
        {
799
          if (ALLOCNO_HARD_REGNO (list->from) >= 0)
800
            {
801
              cost = ira_memory_move_cost[mode][cover_class][0] * freq;
802
              ira_store_cost += cost;
803
            }
804
        }
805
      else if (ALLOCNO_HARD_REGNO (list->from) < 0)
806
        {
807
          if (ALLOCNO_HARD_REGNO (list->to) >= 0)
808
            {
809
              cost = ira_memory_move_cost[mode][cover_class][0] * freq;
810
              ira_load_cost += cost;
811
            }
812
        }
813
      else
814
        {
815
          cost = (ira_get_register_move_cost (mode, cover_class, cover_class)
816
                  * freq);
817
          ira_shuffle_cost += cost;
818
        }
819
      ira_overall_cost += cost;
820
    }
821
  result = get_insns ();
822
  end_sequence ();
823
  return result;
824
}
825
 
826
/* Generate RTX move insns from move lists attached to basic blocks
827
   and edges.  */
828
static void
829
emit_moves (void)
830
{
831
  basic_block bb;
832
  edge_iterator ei;
833
  edge e;
834
  rtx insns, tmp;
835
 
836
  FOR_EACH_BB (bb)
837
    {
838
      if (at_bb_start[bb->index] != NULL)
839
        {
840
          at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
841
          insns = emit_move_list (at_bb_start[bb->index],
842
                                  REG_FREQ_FROM_BB (bb));
843
          tmp = BB_HEAD (bb);
844
          if (LABEL_P (tmp))
845
            tmp = NEXT_INSN (tmp);
846
          if (NOTE_INSN_BASIC_BLOCK_P (tmp))
847
            tmp = NEXT_INSN (tmp);
848
          if (tmp == BB_HEAD (bb))
849
            emit_insn_before (insns, tmp);
850
          else if (tmp != NULL_RTX)
851
            emit_insn_after (insns, PREV_INSN (tmp));
852
          else
853
            emit_insn_after (insns, get_last_insn ());
854
        }
855
 
856
      if (at_bb_end[bb->index] != NULL)
857
        {
858
          at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
859
          insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
860
          ira_assert (! control_flow_insn_p (BB_END (bb)));
861
          emit_insn_after (insns, BB_END (bb));
862
        }
863
 
864
      FOR_EACH_EDGE (e, ei, bb->succs)
865
        {
866
          if (e->aux == NULL)
867
            continue;
868
          ira_assert ((e->flags & EDGE_ABNORMAL) == 0
869
                      || ! EDGE_CRITICAL_P (e));
870
          e->aux = modify_move_list ((move_t) e->aux);
871
          insert_insn_on_edge
872
            (emit_move_list ((move_t) e->aux,
873
                             REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
874
             e);
875
          if (e->src->next_bb != e->dest)
876
            ira_additional_jumps_num++;
877
        }
878
    }
879
}
880
 
881
/* Update costs of A and corresponding allocnos on upper levels on the
882
   loop tree from reading (if READ_P) or writing A on an execution
883
   path with FREQ.  */
884
static void
885
update_costs (ira_allocno_t a, bool read_p, int freq)
886
{
887
  ira_loop_tree_node_t parent;
888
 
889
  for (;;)
890
    {
891
      ALLOCNO_NREFS (a)++;
892
      ALLOCNO_FREQ (a) += freq;
893
      ALLOCNO_MEMORY_COST (a)
894
        += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)]
895
            [read_p ? 1 : 0] * freq);
896
      if (ALLOCNO_CAP (a) != NULL)
897
        a = ALLOCNO_CAP (a);
898
      else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
899
               || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
900
        break;
901
    }
902
}
903
 
904
/* Process moves from LIST with execution FREQ to add ranges, copies,
905
   and modify costs for allocnos involved in the moves.  All regnos
906
   living through the list is in LIVE_THROUGH, and the loop tree node
907
   used to find corresponding allocnos is NODE.  */
908
static void
909
add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
910
                                     bitmap live_through, int freq)
911
{
912
  int start, n;
913
  unsigned int regno;
914
  move_t move;
915
  ira_allocno_t to, from, a;
916
  ira_copy_t cp;
917
  allocno_live_range_t r;
918
  bitmap_iterator bi;
919
  HARD_REG_SET hard_regs_live;
920
 
921
  if (list == NULL)
922
    return;
923
  n = 0;
924
  EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
925
    n++;
926
  REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
927
  /* This is a trick to guarantee that new ranges is not merged with
928
     the old ones.  */
929
  ira_max_point++;
930
  start = ira_max_point;
931
  for (move = list; move != NULL; move = move->next)
932
    {
933
      from = move->from;
934
      to = move->to;
935
      if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (to) == NULL)
936
        {
937
          if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
938
            fprintf (ira_dump_file, "    Allocate conflicts for a%dr%d\n",
939
                     ALLOCNO_NUM (to), REGNO (ALLOCNO_REG (to)));
940
          ira_allocate_allocno_conflicts (to, n);
941
        }
942
      bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
943
      bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
944
      IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (from), hard_regs_live);
945
      IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (to), hard_regs_live);
946
      IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (from),
947
                        hard_regs_live);
948
      IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (to), hard_regs_live);
949
      update_costs (from, true, freq);
950
      update_costs (to, false, freq);
951
      cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
952
      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
953
        fprintf (ira_dump_file, "    Adding cp%d:a%dr%d-a%dr%d\n",
954
                 cp->num, ALLOCNO_NUM (cp->first),
955
                 REGNO (ALLOCNO_REG (cp->first)), ALLOCNO_NUM (cp->second),
956
                 REGNO (ALLOCNO_REG (cp->second)));
957
      r = ALLOCNO_LIVE_RANGES (from);
958
      if (r == NULL || r->finish >= 0)
959
        {
960
          ALLOCNO_LIVE_RANGES (from)
961
            = ira_create_allocno_live_range (from, start, ira_max_point, r);
962
          if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
963
            fprintf (ira_dump_file,
964
                     "    Adding range [%d..%d] to allocno a%dr%d\n",
965
                     start, ira_max_point, ALLOCNO_NUM (from),
966
                     REGNO (ALLOCNO_REG (from)));
967
        }
968
      else
969
        {
970
          r->finish = ira_max_point;
971
          if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
972
            fprintf (ira_dump_file,
973
                     "    Adding range [%d..%d] to allocno a%dr%d\n",
974
                     r->start, ira_max_point, ALLOCNO_NUM (from),
975
                     REGNO (ALLOCNO_REG (from)));
976
        }
977
      ira_max_point++;
978
      ALLOCNO_LIVE_RANGES (to)
979
        = ira_create_allocno_live_range (to, ira_max_point, -1,
980
                                         ALLOCNO_LIVE_RANGES (to));
981
      ira_max_point++;
982
    }
983
  for (move = list; move != NULL; move = move->next)
984
    {
985
      r = ALLOCNO_LIVE_RANGES (move->to);
986
      if (r->finish < 0)
987
        {
988
          r->finish = ira_max_point - 1;
989
          if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
990
            fprintf (ira_dump_file,
991
                     "    Adding range [%d..%d] to allocno a%dr%d\n",
992
                     r->start, r->finish, ALLOCNO_NUM (move->to),
993
                     REGNO (ALLOCNO_REG (move->to)));
994
        }
995
    }
996
  EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
997
    {
998
      a = node->regno_allocno_map[regno];
999
      if ((to = ALLOCNO_MEM_OPTIMIZED_DEST (a)) != NULL)
1000
        a = to;
1001
      ALLOCNO_LIVE_RANGES (a)
1002
        = ira_create_allocno_live_range (a, start, ira_max_point - 1,
1003
                                         ALLOCNO_LIVE_RANGES (a));
1004
      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1005
        fprintf
1006
          (ira_dump_file,
1007
           "    Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1008
           start, ira_max_point - 1,
1009
           to != NULL ? "upper level" : "",
1010
           ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
1011
    }
1012
}
1013
 
1014
/* Process all move list to add ranges, conflicts, copies, and modify
1015
   costs for allocnos involved in the moves.  */
1016
static void
1017
add_ranges_and_copies (void)
1018
{
1019
  basic_block bb;
1020
  edge_iterator ei;
1021
  edge e;
1022
  ira_loop_tree_node_t node;
1023
  bitmap live_through;
1024
 
1025
  live_through = ira_allocate_bitmap ();
1026
  FOR_EACH_BB (bb)
1027
    {
1028
      /* It does not matter what loop_tree_node (of source or
1029
         destination block) to use for searching allocnos by their
1030
         regnos because of subsequent IR flattening.  */
1031
      node = IRA_BB_NODE (bb)->parent;
1032
      bitmap_copy (live_through, DF_LR_IN (bb));
1033
      add_range_and_copies_from_move_list
1034
        (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1035
      bitmap_copy (live_through, DF_LR_OUT (bb));
1036
      add_range_and_copies_from_move_list
1037
        (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1038
      FOR_EACH_EDGE (e, ei, bb->succs)
1039
        {
1040
          bitmap_and (live_through, DF_LR_IN (e->dest), DF_LR_OUT (bb));
1041
          add_range_and_copies_from_move_list
1042
            ((move_t) e->aux, node, live_through,
1043
             REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1044
        }
1045
    }
1046
  ira_free_bitmap (live_through);
1047
}
1048
 
1049
/* The entry function changes code and generates shuffling allocnos on
1050
   region borders for the regional (LOOPS_P is TRUE in this case)
1051
   register allocation.  */
1052
void
1053
ira_emit (bool loops_p)
1054
{
1055
  basic_block bb;
1056
  rtx insn;
1057
  edge_iterator ei;
1058
  edge e;
1059
  ira_allocno_t a;
1060
  ira_allocno_iterator ai;
1061
 
1062
  FOR_EACH_ALLOCNO (a, ai)
1063
    ALLOCNO_REG (a) = regno_reg_rtx[ALLOCNO_REGNO (a)];
1064
  if (! loops_p)
1065
    return;
1066
  at_bb_start = (move_t *) ira_allocate (sizeof (move_t) * last_basic_block);
1067
  memset (at_bb_start, 0, sizeof (move_t) * last_basic_block);
1068
  at_bb_end = (move_t *) ira_allocate (sizeof (move_t) * last_basic_block);
1069
  memset (at_bb_end, 0, sizeof (move_t) * last_basic_block);
1070
  local_allocno_bitmap = ira_allocate_bitmap ();
1071
  used_regno_bitmap = ira_allocate_bitmap ();
1072
  renamed_regno_bitmap = ira_allocate_bitmap ();
1073
  max_regno_before_changing = max_reg_num ();
1074
  ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1075
  set_allocno_somewhere_renamed_p ();
1076
  ira_free_bitmap (used_regno_bitmap);
1077
  ira_free_bitmap (renamed_regno_bitmap);
1078
  ira_free_bitmap (local_allocno_bitmap);
1079
  setup_entered_from_non_parent_p ();
1080
  FOR_EACH_BB (bb)
1081
    {
1082
      at_bb_start[bb->index] = NULL;
1083
      at_bb_end[bb->index] = NULL;
1084
      FOR_EACH_EDGE (e, ei, bb->succs)
1085
        if (e->dest != EXIT_BLOCK_PTR)
1086
          generate_edge_moves (e);
1087
    }
1088
  allocno_last_set
1089
    = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1090
  allocno_last_set_check
1091
    = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1092
  memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1093
  memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1094
  curr_tick = 0;
1095
  FOR_EACH_BB (bb)
1096
    unify_moves (bb, true);
1097
  FOR_EACH_BB (bb)
1098
    unify_moves (bb, false);
1099
  move_vec = VEC_alloc (move_t, heap, ira_allocnos_num);
1100
  emit_moves ();
1101
  add_ranges_and_copies ();
1102
  /* Clean up: */
1103
  FOR_EACH_BB (bb)
1104
    {
1105
      free_move_list (at_bb_start[bb->index]);
1106
      free_move_list (at_bb_end[bb->index]);
1107
      FOR_EACH_EDGE (e, ei, bb->succs)
1108
        {
1109
          free_move_list ((move_t) e->aux);
1110
          e->aux = NULL;
1111
        }
1112
    }
1113
  VEC_free (move_t, heap, move_vec);
1114
  ira_free (allocno_last_set_check);
1115
  ira_free (allocno_last_set);
1116
  commit_edge_insertions ();
1117
  /* Fix insn codes.  It is necessary to do it before reload because
1118
     reload assumes initial insn codes defined.  The insn codes can be
1119
     invalidated by CFG infrastructure for example in jump
1120
     redirection.  */
1121
  FOR_EACH_BB (bb)
1122
    FOR_BB_INSNS_REVERSE (bb, insn)
1123
      if (INSN_P (insn))
1124
        recog_memoized (insn);
1125
  ira_free (at_bb_end);
1126
  ira_free (at_bb_start);
1127
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.