OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [ira-build.c] - Blame information for rev 733

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Building internal representation for IRA.
2
   Copyright (C) 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
   Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "rtl.h"
27
#include "tm_p.h"
28
#include "target.h"
29
#include "regs.h"
30
#include "flags.h"
31
#include "hard-reg-set.h"
32
#include "basic-block.h"
33
#include "insn-config.h"
34
#include "recog.h"
35
#include "diagnostic-core.h"
36
#include "params.h"
37
#include "df.h"
38
#include "output.h"
39
#include "reload.h"
40
#include "sparseset.h"
41
#include "ira-int.h"
42
#include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
43
 
44
static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
45
                                     ira_loop_tree_node_t);
46
 
47
/* The root of the loop tree corresponding to the all function.  */
48
ira_loop_tree_node_t ira_loop_tree_root;
49
 
50
/* Height of the loop tree.  */
51
int ira_loop_tree_height;
52
 
53
/* All nodes representing basic blocks are referred through the
54
   following array.  We can not use basic block member `aux' for this
55
   because it is used for insertion of insns on edges.  */
56
ira_loop_tree_node_t ira_bb_nodes;
57
 
58
/* All nodes representing loops are referred through the following
59
   array.  */
60
ira_loop_tree_node_t ira_loop_nodes;
61
 
62
/* Map regno -> allocnos with given regno (see comments for
63
   allocno member `next_regno_allocno').  */
64
ira_allocno_t *ira_regno_allocno_map;
65
 
66
/* Array of references to all allocnos.  The order number of the
67
   allocno corresponds to the index in the array.  Removed allocnos
68
   have NULL element value.  */
69
ira_allocno_t *ira_allocnos;
70
 
71
/* Sizes of the previous array.  */
72
int ira_allocnos_num;
73
 
74
/* Count of conflict record structures we've created, used when creating
75
   a new conflict id.  */
76
int ira_objects_num;
77
 
78
/* Map a conflict id to its conflict record.  */
79
ira_object_t *ira_object_id_map;
80
 
81
/* Array of references to all copies.  The order number of the copy
82
   corresponds to the index in the array.  Removed copies have NULL
83
   element value.  */
84
ira_copy_t *ira_copies;
85
 
86
/* Size of the previous array.  */
87
int ira_copies_num;
88
 
89
 
90
 
91
/* LAST_BASIC_BLOCK before generating additional insns because of live
92
   range splitting.  Emitting insns on a critical edge creates a new
93
   basic block.  */
94
static int last_basic_block_before_change;
95
 
96
/* Initialize some members in loop tree node NODE.  Use LOOP_NUM for
97
   the member loop_num.  */
98
static void
99
init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
100
{
101
  int max_regno = max_reg_num ();
102
 
103
  node->regno_allocno_map
104
    = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
105
  memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
106
  memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
107
  node->all_allocnos = ira_allocate_bitmap ();
108
  node->modified_regnos = ira_allocate_bitmap ();
109
  node->border_allocnos = ira_allocate_bitmap ();
110
  node->local_copies = ira_allocate_bitmap ();
111
  node->loop_num = loop_num;
112
  node->children = NULL;
113
  node->subloops = NULL;
114
}
115
 
116
 
117
/* The following function allocates the loop tree nodes.  If
118
   CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
119
   the root which corresponds the all function) will be not allocated
120
   but nodes will still be allocated for basic blocks.  */
121
static void
122
create_loop_tree_nodes (void)
123
{
124
  unsigned int i, j;
125
  bool skip_p;
126
  edge_iterator ei;
127
  edge e;
128
  VEC (edge, heap) *edges;
129
  loop_p loop;
130
 
131
  ira_bb_nodes
132
    = ((struct ira_loop_tree_node *)
133
       ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
134
  last_basic_block_before_change = last_basic_block;
135
  for (i = 0; i < (unsigned int) last_basic_block; i++)
136
    {
137
      ira_bb_nodes[i].regno_allocno_map = NULL;
138
      memset (ira_bb_nodes[i].reg_pressure, 0,
139
              sizeof (ira_bb_nodes[i].reg_pressure));
140
      ira_bb_nodes[i].all_allocnos = NULL;
141
      ira_bb_nodes[i].modified_regnos = NULL;
142
      ira_bb_nodes[i].border_allocnos = NULL;
143
      ira_bb_nodes[i].local_copies = NULL;
144
    }
145
  if (current_loops == NULL)
146
    {
147
      ira_loop_nodes = ((struct ira_loop_tree_node *)
148
                        ira_allocate (sizeof (struct ira_loop_tree_node)));
149
      init_loop_tree_node (ira_loop_nodes, 0);
150
      return;
151
    }
152
  ira_loop_nodes = ((struct ira_loop_tree_node *)
153
                    ira_allocate (sizeof (struct ira_loop_tree_node)
154
                                  * VEC_length (loop_p, ira_loops.larray)));
155
  FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
156
    {
157
      if (loop != ira_loops.tree_root)
158
        {
159
          ira_loop_nodes[i].regno_allocno_map = NULL;
160
          skip_p = false;
161
          FOR_EACH_EDGE (e, ei, loop->header->preds)
162
            if (e->src != loop->latch
163
                && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
164
              {
165
                skip_p = true;
166
                break;
167
              }
168
          if (skip_p)
169
            continue;
170
          edges = get_loop_exit_edges (loop);
171
          FOR_EACH_VEC_ELT (edge, edges, j, e)
172
            if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
173
              {
174
                skip_p = true;
175
                break;
176
              }
177
          VEC_free (edge, heap, edges);
178
          if (skip_p)
179
            continue;
180
        }
181
      init_loop_tree_node (&ira_loop_nodes[i], loop->num);
182
    }
183
}
184
 
185
/* The function returns TRUE if there are more one allocation
186
   region.  */
187
static bool
188
more_one_region_p (void)
189
{
190
  unsigned int i;
191
  loop_p loop;
192
 
193
  if (current_loops != NULL)
194
    FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
195
      if (ira_loop_nodes[i].regno_allocno_map != NULL
196
          && ira_loop_tree_root != &ira_loop_nodes[i])
197
        return true;
198
  return false;
199
}
200
 
201
/* Free the loop tree node of a loop.  */
202
static void
203
finish_loop_tree_node (ira_loop_tree_node_t loop)
204
{
205
  if (loop->regno_allocno_map != NULL)
206
    {
207
      ira_assert (loop->bb == NULL);
208
      ira_free_bitmap (loop->local_copies);
209
      ira_free_bitmap (loop->border_allocnos);
210
      ira_free_bitmap (loop->modified_regnos);
211
      ira_free_bitmap (loop->all_allocnos);
212
      ira_free (loop->regno_allocno_map);
213
      loop->regno_allocno_map = NULL;
214
    }
215
}
216
 
217
/* Free the loop tree nodes.  */
218
static void
219
finish_loop_tree_nodes (void)
220
{
221
  unsigned int i;
222
  loop_p loop;
223
 
224
  if (current_loops == NULL)
225
    finish_loop_tree_node (&ira_loop_nodes[0]);
226
  else
227
    FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
228
      finish_loop_tree_node (&ira_loop_nodes[i]);
229
  ira_free (ira_loop_nodes);
230
  for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
231
    {
232
      if (ira_bb_nodes[i].local_copies != NULL)
233
        ira_free_bitmap (ira_bb_nodes[i].local_copies);
234
      if (ira_bb_nodes[i].border_allocnos != NULL)
235
        ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
236
      if (ira_bb_nodes[i].modified_regnos != NULL)
237
        ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
238
      if (ira_bb_nodes[i].all_allocnos != NULL)
239
        ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
240
      if (ira_bb_nodes[i].regno_allocno_map != NULL)
241
        ira_free (ira_bb_nodes[i].regno_allocno_map);
242
    }
243
  ira_free (ira_bb_nodes);
244
}
245
 
246
 
247
 
248
/* The following recursive function adds LOOP to the loop tree
249
   hierarchy.  LOOP is added only once.  If LOOP is NULL we adding
250
   loop designating the whole function when CFG loops are not
251
   built.  */
252
static void
253
add_loop_to_tree (struct loop *loop)
254
{
255
  int loop_num;
256
  struct loop *parent;
257
  ira_loop_tree_node_t loop_node, parent_node;
258
 
259
  /* We can not use loop node access macros here because of potential
260
     checking and because the nodes are not initialized enough
261
     yet.  */
262
  if (loop != NULL && loop_outer (loop) != NULL)
263
    add_loop_to_tree (loop_outer (loop));
264
  loop_num = loop != NULL ? loop->num : 0;
265
  if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
266
      && ira_loop_nodes[loop_num].children == NULL)
267
    {
268
      /* We have not added loop node to the tree yet.  */
269
      loop_node = &ira_loop_nodes[loop_num];
270
      loop_node->loop = loop;
271
      loop_node->bb = NULL;
272
      if (loop == NULL)
273
        parent = NULL;
274
      else
275
        {
276
          for (parent = loop_outer (loop);
277
               parent != NULL;
278
               parent = loop_outer (parent))
279
            if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
280
              break;
281
        }
282
      if (parent == NULL)
283
        {
284
          loop_node->next = NULL;
285
          loop_node->subloop_next = NULL;
286
          loop_node->parent = NULL;
287
        }
288
      else
289
        {
290
          parent_node = &ira_loop_nodes[parent->num];
291
          loop_node->next = parent_node->children;
292
          parent_node->children = loop_node;
293
          loop_node->subloop_next = parent_node->subloops;
294
          parent_node->subloops = loop_node;
295
          loop_node->parent = parent_node;
296
        }
297
    }
298
}
299
 
300
/* The following recursive function sets up levels of nodes of the
301
   tree given its root LOOP_NODE.  The enumeration starts with LEVEL.
302
   The function returns maximal value of level in the tree + 1.  */
303
static int
304
setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
305
{
306
  int height, max_height;
307
  ira_loop_tree_node_t subloop_node;
308
 
309
  ira_assert (loop_node->bb == NULL);
310
  loop_node->level = level;
311
  max_height = level + 1;
312
  for (subloop_node = loop_node->subloops;
313
       subloop_node != NULL;
314
       subloop_node = subloop_node->subloop_next)
315
    {
316
      ira_assert (subloop_node->bb == NULL);
317
      height = setup_loop_tree_level (subloop_node, level + 1);
318
      if (height > max_height)
319
        max_height = height;
320
    }
321
  return max_height;
322
}
323
 
324
/* Create the loop tree.  The algorithm is designed to provide correct
325
   order of loops (they are ordered by their last loop BB) and basic
326
   blocks in the chain formed by member next.  */
327
static void
328
form_loop_tree (void)
329
{
330
  basic_block bb;
331
  struct loop *parent;
332
  ira_loop_tree_node_t bb_node, loop_node;
333
 
334
  /* We can not use loop/bb node access macros because of potential
335
     checking and because the nodes are not initialized enough
336
     yet.  */
337
  FOR_EACH_BB (bb)
338
    {
339
      bb_node = &ira_bb_nodes[bb->index];
340
      bb_node->bb = bb;
341
      bb_node->loop = NULL;
342
      bb_node->subloops = NULL;
343
      bb_node->children = NULL;
344
      bb_node->subloop_next = NULL;
345
      bb_node->next = NULL;
346
      if (current_loops == NULL)
347
        parent = NULL;
348
      else
349
        {
350
          for (parent = bb->loop_father;
351
               parent != NULL;
352
               parent = loop_outer (parent))
353
            if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
354
              break;
355
        }
356
      add_loop_to_tree (parent);
357
      loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
358
      bb_node->next = loop_node->children;
359
      bb_node->parent = loop_node;
360
      loop_node->children = bb_node;
361
    }
362
  ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
363
  ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
364
  ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
365
}
366
 
367
 
368
 
369
/* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
370
   tree nodes.  */
371
static void
372
rebuild_regno_allocno_maps (void)
373
{
374
  unsigned int l;
375
  int max_regno, regno;
376
  ira_allocno_t a;
377
  ira_loop_tree_node_t loop_tree_node;
378
  loop_p loop;
379
  ira_allocno_iterator ai;
380
 
381
  ira_assert (current_loops != NULL);
382
  max_regno = max_reg_num ();
383
  FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, l, loop)
384
    if (ira_loop_nodes[l].regno_allocno_map != NULL)
385
      {
386
        ira_free (ira_loop_nodes[l].regno_allocno_map);
387
        ira_loop_nodes[l].regno_allocno_map
388
          = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
389
                                            * max_regno);
390
        memset (ira_loop_nodes[l].regno_allocno_map, 0,
391
                sizeof (ira_allocno_t) * max_regno);
392
      }
393
  ira_free (ira_regno_allocno_map);
394
  ira_regno_allocno_map
395
    = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
396
  memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
397
  FOR_EACH_ALLOCNO (a, ai)
398
    {
399
      if (ALLOCNO_CAP_MEMBER (a) != NULL)
400
        /* Caps are not in the regno allocno maps.  */
401
        continue;
402
      regno = ALLOCNO_REGNO (a);
403
      loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
404
      ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
405
      ira_regno_allocno_map[regno] = a;
406
      if (loop_tree_node->regno_allocno_map[regno] == NULL)
407
        /* Remember that we can create temporary allocnos to break
408
           cycles in register shuffle.  */
409
        loop_tree_node->regno_allocno_map[regno] = a;
410
    }
411
}
412
 
413
 
414
/* Pools for allocnos, allocno live ranges and objects.  */
415
static alloc_pool allocno_pool, live_range_pool, object_pool;
416
 
417
/* Vec containing references to all created allocnos.  It is a
418
   container of array allocnos.  */
419
static VEC(ira_allocno_t,heap) *allocno_vec;
420
 
421
/* Vec containing references to all created ira_objects.  It is a
422
   container of ira_object_id_map.  */
423
static VEC(ira_object_t,heap) *ira_object_id_map_vec;
424
 
425
/* Initialize data concerning allocnos.  */
426
static void
427
initiate_allocnos (void)
428
{
429
  live_range_pool
430
    = create_alloc_pool ("live ranges",
431
                         sizeof (struct live_range), 100);
432
  allocno_pool
433
    = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
434
  object_pool
435
    = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
436
  allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
437
  ira_allocnos = NULL;
438
  ira_allocnos_num = 0;
439
  ira_objects_num = 0;
440
  ira_object_id_map_vec
441
    = VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
442
  ira_object_id_map = NULL;
443
  ira_regno_allocno_map
444
    = (ira_allocno_t *) ira_allocate (max_reg_num ()
445
                                      * sizeof (ira_allocno_t));
446
  memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
447
}
448
 
449
/* Create and return an object corresponding to a new allocno A.  */
450
static ira_object_t
451
ira_create_object (ira_allocno_t a, int subword)
452
{
453
  enum reg_class aclass = ALLOCNO_CLASS (a);
454
  ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
455
 
456
  OBJECT_ALLOCNO (obj) = a;
457
  OBJECT_SUBWORD (obj) = subword;
458
  OBJECT_CONFLICT_ID (obj) = ira_objects_num;
459
  OBJECT_CONFLICT_VEC_P (obj) = false;
460
  OBJECT_CONFLICT_ARRAY (obj) = NULL;
461
  OBJECT_NUM_CONFLICTS (obj) = 0;
462
  COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
463
  COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
464
  IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
465
                          reg_class_contents[aclass]);
466
  IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
467
                          reg_class_contents[aclass]);
468
  OBJECT_MIN (obj) = INT_MAX;
469
  OBJECT_MAX (obj) = -1;
470
  OBJECT_LIVE_RANGES (obj) = NULL;
471
 
472
  VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
473
  ira_object_id_map
474
    = VEC_address (ira_object_t, ira_object_id_map_vec);
475
  ira_objects_num = VEC_length (ira_object_t, ira_object_id_map_vec);
476
 
477
  return obj;
478
}
479
 
480
/* Create and return the allocno corresponding to REGNO in
481
   LOOP_TREE_NODE.  Add the allocno to the list of allocnos with the
482
   same regno if CAP_P is FALSE.  */
483
ira_allocno_t
484
ira_create_allocno (int regno, bool cap_p,
485
                    ira_loop_tree_node_t loop_tree_node)
486
{
487
  ira_allocno_t a;
488
 
489
  a = (ira_allocno_t) pool_alloc (allocno_pool);
490
  ALLOCNO_REGNO (a) = regno;
491
  ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
492
  if (! cap_p)
493
    {
494
      ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
495
      ira_regno_allocno_map[regno] = a;
496
      if (loop_tree_node->regno_allocno_map[regno] == NULL)
497
        /* Remember that we can create temporary allocnos to break
498
           cycles in register shuffle on region borders (see
499
           ira-emit.c).  */
500
        loop_tree_node->regno_allocno_map[regno] = a;
501
    }
502
  ALLOCNO_CAP (a) = NULL;
503
  ALLOCNO_CAP_MEMBER (a) = NULL;
504
  ALLOCNO_NUM (a) = ira_allocnos_num;
505
  bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
506
  ALLOCNO_NREFS (a) = 0;
507
  ALLOCNO_FREQ (a) = 0;
508
  ALLOCNO_HARD_REGNO (a) = -1;
509
  ALLOCNO_CALL_FREQ (a) = 0;
510
  ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
511
#ifdef STACK_REGS
512
  ALLOCNO_NO_STACK_REG_P (a) = false;
513
  ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
514
#endif
515
  ALLOCNO_DONT_REASSIGN_P (a) = false;
516
  ALLOCNO_BAD_SPILL_P (a) = false;
517
  ALLOCNO_ASSIGNED_P (a) = false;
518
  ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
519
  ALLOCNO_COPIES (a) = NULL;
520
  ALLOCNO_HARD_REG_COSTS (a) = NULL;
521
  ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
522
  ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
523
  ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
524
  ALLOCNO_CLASS (a) = NO_REGS;
525
  ALLOCNO_UPDATED_CLASS_COST (a) = 0;
526
  ALLOCNO_CLASS_COST (a) = 0;
527
  ALLOCNO_MEMORY_COST (a) = 0;
528
  ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
529
  ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
530
  ALLOCNO_NUM_OBJECTS (a) = 0;
531
 
532
  ALLOCNO_ADD_DATA (a) = NULL;
533
  VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
534
  ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
535
  ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
536
 
537
  return a;
538
}
539
 
540
/* Set up register class for A and update its conflict hard
541
   registers.  */
542
void
543
ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
544
{
545
  ira_allocno_object_iterator oi;
546
  ira_object_t obj;
547
 
548
  ALLOCNO_CLASS (a) = aclass;
549
  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
550
    {
551
      IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
552
                              reg_class_contents[aclass]);
553
      IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
554
                              reg_class_contents[aclass]);
555
    }
556
}
557
 
558
/* Determine the number of objects we should associate with allocno A
559
   and allocate them.  */
560
void
561
ira_create_allocno_objects (ira_allocno_t a)
562
{
563
  enum machine_mode mode = ALLOCNO_MODE (a);
564
  enum reg_class aclass = ALLOCNO_CLASS (a);
565
  int n = ira_reg_class_max_nregs[aclass][mode];
566
  int i;
567
 
568
  if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
569
    n = 1;
570
 
571
  ALLOCNO_NUM_OBJECTS (a) = n;
572
  for (i = 0; i < n; i++)
573
    ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
574
}
575
 
576
/* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
577
   ALLOCNO_OBJECT structures.  This must be called after the allocno
578
   classes are known.  */
579
static void
580
create_allocno_objects (void)
581
{
582
  ira_allocno_t a;
583
  ira_allocno_iterator ai;
584
 
585
  FOR_EACH_ALLOCNO (a, ai)
586
    ira_create_allocno_objects (a);
587
}
588
 
589
/* Merge hard register conflict information for all objects associated with
590
   allocno TO into the corresponding objects associated with FROM.
591
   If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS.  */
592
static void
593
merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
594
                          bool total_only)
595
{
596
  int i;
597
  gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
598
  for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
599
    {
600
      ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
601
      ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
602
 
603
      if (!total_only)
604
        IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
605
                          OBJECT_CONFLICT_HARD_REGS (from_obj));
606
      IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
607
                        OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
608
    }
609
#ifdef STACK_REGS
610
  if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
611
    ALLOCNO_NO_STACK_REG_P (to) = true;
612
  if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
613
    ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
614
#endif
615
}
616
 
617
/* Update hard register conflict information for all objects associated with
618
   A to include the regs in SET.  */
619
void
620
ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
621
{
622
  ira_allocno_object_iterator i;
623
  ira_object_t obj;
624
 
625
  FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
626
    {
627
      IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
628
      IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
629
    }
630
}
631
 
632
/* Return TRUE if a conflict vector with NUM elements is more
633
   profitable than a conflict bit vector for OBJ.  */
634
bool
635
ira_conflict_vector_profitable_p (ira_object_t obj, int num)
636
{
637
  int nw;
638
  int max = OBJECT_MAX (obj);
639
  int min = OBJECT_MIN (obj);
640
 
641
  if (max < min)
642
    /* We prefer a bit vector in such case because it does not result
643
       in allocation.  */
644
    return false;
645
 
646
  nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
647
  return (2 * sizeof (ira_object_t) * (num + 1)
648
          < 3 * nw * sizeof (IRA_INT_TYPE));
649
}
650
 
651
/* Allocates and initialize the conflict vector of OBJ for NUM
652
   conflicting objects.  */
653
void
654
ira_allocate_conflict_vec (ira_object_t obj, int num)
655
{
656
  int size;
657
  ira_object_t *vec;
658
 
659
  ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
660
  num++; /* for NULL end marker  */
661
  size = sizeof (ira_object_t) * num;
662
  OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
663
  vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
664
  vec[0] = NULL;
665
  OBJECT_NUM_CONFLICTS (obj) = 0;
666
  OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
667
  OBJECT_CONFLICT_VEC_P (obj) = true;
668
}
669
 
670
/* Allocate and initialize the conflict bit vector of OBJ.  */
671
static void
672
allocate_conflict_bit_vec (ira_object_t obj)
673
{
674
  unsigned int size;
675
 
676
  ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
677
  size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
678
          / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
679
  OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
680
  memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
681
  OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
682
  OBJECT_CONFLICT_VEC_P (obj) = false;
683
}
684
 
685
/* Allocate and initialize the conflict vector or conflict bit vector
686
   of OBJ for NUM conflicting allocnos whatever is more profitable.  */
687
void
688
ira_allocate_object_conflicts (ira_object_t obj, int num)
689
{
690
  if (ira_conflict_vector_profitable_p (obj, num))
691
    ira_allocate_conflict_vec (obj, num);
692
  else
693
    allocate_conflict_bit_vec (obj);
694
}
695
 
696
/* Add OBJ2 to the conflicts of OBJ1.  */
697
static void
698
add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
699
{
700
  int num;
701
  unsigned int size;
702
 
703
  if (OBJECT_CONFLICT_VEC_P (obj1))
704
    {
705
      ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
706
      int curr_num = OBJECT_NUM_CONFLICTS (obj1);
707
      num = curr_num + 2;
708
      if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
709
        {
710
          ira_object_t *newvec;
711
          size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
712
          newvec = (ira_object_t *) ira_allocate (size);
713
          memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
714
          ira_free (vec);
715
          vec = newvec;
716
          OBJECT_CONFLICT_ARRAY (obj1) = vec;
717
          OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
718
        }
719
      vec[num - 2] = obj2;
720
      vec[num - 1] = NULL;
721
      OBJECT_NUM_CONFLICTS (obj1)++;
722
    }
723
  else
724
    {
725
      int nw, added_head_nw, id;
726
      IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
727
 
728
      id = OBJECT_CONFLICT_ID (obj2);
729
      if (OBJECT_MIN (obj1) > id)
730
        {
731
          /* Expand head of the bit vector.  */
732
          added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
733
          nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
734
          size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
735
          if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
736
            {
737
              memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
738
                       vec, nw * sizeof (IRA_INT_TYPE));
739
              memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
740
            }
741
          else
742
            {
743
              size
744
                = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
745
              vec = (IRA_INT_TYPE *) ira_allocate (size);
746
              memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
747
                      OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
748
              memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
749
              memset ((char *) vec
750
                      + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
751
                      0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
752
              ira_free (OBJECT_CONFLICT_ARRAY (obj1));
753
              OBJECT_CONFLICT_ARRAY (obj1) = vec;
754
              OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
755
            }
756
          OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
757
        }
758
      else if (OBJECT_MAX (obj1) < id)
759
        {
760
          nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
761
          size = nw * sizeof (IRA_INT_TYPE);
762
          if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
763
            {
764
              /* Expand tail of the bit vector.  */
765
              size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
766
              vec = (IRA_INT_TYPE *) ira_allocate (size);
767
              memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
768
              memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
769
                      0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
770
              ira_free (OBJECT_CONFLICT_ARRAY (obj1));
771
              OBJECT_CONFLICT_ARRAY (obj1) = vec;
772
              OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
773
            }
774
          OBJECT_MAX (obj1) = id;
775
        }
776
      SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
777
    }
778
}
779
 
780
/* Add OBJ1 to the conflicts of OBJ2 and vice versa.  */
781
static void
782
ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
783
{
784
  add_to_conflicts (obj1, obj2);
785
  add_to_conflicts (obj2, obj1);
786
}
787
 
788
/* Clear all conflicts of OBJ.  */
789
static void
790
clear_conflicts (ira_object_t obj)
791
{
792
  if (OBJECT_CONFLICT_VEC_P (obj))
793
    {
794
      OBJECT_NUM_CONFLICTS (obj) = 0;
795
      OBJECT_CONFLICT_VEC (obj)[0] = NULL;
796
    }
797
  else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
798
    {
799
      int nw;
800
 
801
      nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
802
      memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
803
    }
804
}
805
 
806
/* The array used to find duplications in conflict vectors of
807
   allocnos.  */
808
static int *conflict_check;
809
 
810
/* The value used to mark allocation presence in conflict vector of
811
   the current allocno.  */
812
static int curr_conflict_check_tick;
813
 
814
/* Remove duplications in conflict vector of OBJ.  */
815
static void
816
compress_conflict_vec (ira_object_t obj)
817
{
818
  ira_object_t *vec, conflict_obj;
819
  int i, j;
820
 
821
  ira_assert (OBJECT_CONFLICT_VEC_P (obj));
822
  vec = OBJECT_CONFLICT_VEC (obj);
823
  curr_conflict_check_tick++;
824
  for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
825
    {
826
      int id = OBJECT_CONFLICT_ID (conflict_obj);
827
      if (conflict_check[id] != curr_conflict_check_tick)
828
        {
829
          conflict_check[id] = curr_conflict_check_tick;
830
          vec[j++] = conflict_obj;
831
        }
832
    }
833
  OBJECT_NUM_CONFLICTS (obj) = j;
834
  vec[j] = NULL;
835
}
836
 
837
/* Remove duplications in conflict vectors of all allocnos.  */
838
static void
839
compress_conflict_vecs (void)
840
{
841
  ira_object_t obj;
842
  ira_object_iterator oi;
843
 
844
  conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
845
  memset (conflict_check, 0, sizeof (int) * ira_objects_num);
846
  curr_conflict_check_tick = 0;
847
  FOR_EACH_OBJECT (obj, oi)
848
    {
849
      if (OBJECT_CONFLICT_VEC_P (obj))
850
        compress_conflict_vec (obj);
851
    }
852
  ira_free (conflict_check);
853
}
854
 
855
/* This recursive function outputs allocno A and if it is a cap the
856
   function outputs its members.  */
857
void
858
ira_print_expanded_allocno (ira_allocno_t a)
859
{
860
  basic_block bb;
861
 
862
  fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
863
  if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
864
    fprintf (ira_dump_file, ",b%d", bb->index);
865
  else
866
    fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
867
  if (ALLOCNO_CAP_MEMBER (a) != NULL)
868
    {
869
      fprintf (ira_dump_file, ":");
870
      ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
871
    }
872
  fprintf (ira_dump_file, ")");
873
}
874
 
875
/* Create and return the cap representing allocno A in the
876
   parent loop.  */
877
static ira_allocno_t
878
create_cap_allocno (ira_allocno_t a)
879
{
880
  ira_allocno_t cap;
881
  ira_loop_tree_node_t parent;
882
  enum reg_class aclass;
883
 
884
  parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
885
  cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
886
  ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
887
  aclass = ALLOCNO_CLASS (a);
888
  ira_set_allocno_class (cap, aclass);
889
  ira_create_allocno_objects (cap);
890
  ALLOCNO_CAP_MEMBER (cap) = a;
891
  ALLOCNO_CAP (a) = cap;
892
  ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
893
  ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
894
  ira_allocate_and_copy_costs
895
    (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
896
  ira_allocate_and_copy_costs
897
    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
898
     ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
899
  ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
900
  ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
901
  ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
902
  ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
903
 
904
  merge_hard_reg_conflicts (a, cap, false);
905
 
906
  ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
907
  if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
908
    {
909
      fprintf (ira_dump_file, "    Creating cap ");
910
      ira_print_expanded_allocno (cap);
911
      fprintf (ira_dump_file, "\n");
912
    }
913
  return cap;
914
}
915
 
916
/* Create and return a live range for OBJECT with given attributes.  */
917
live_range_t
918
ira_create_live_range (ira_object_t obj, int start, int finish,
919
                       live_range_t next)
920
{
921
  live_range_t p;
922
 
923
  p = (live_range_t) pool_alloc (live_range_pool);
924
  p->object = obj;
925
  p->start = start;
926
  p->finish = finish;
927
  p->next = next;
928
  return p;
929
}
930
 
931
/* Create a new live range for OBJECT and queue it at the head of its
932
   live range list.  */
933
void
934
ira_add_live_range_to_object (ira_object_t object, int start, int finish)
935
{
936
  live_range_t p;
937
  p = ira_create_live_range (object, start, finish,
938
                             OBJECT_LIVE_RANGES (object));
939
  OBJECT_LIVE_RANGES (object) = p;
940
}
941
 
942
/* Copy allocno live range R and return the result.  */
943
static live_range_t
944
copy_live_range (live_range_t r)
945
{
946
  live_range_t p;
947
 
948
  p = (live_range_t) pool_alloc (live_range_pool);
949
  *p = *r;
950
  return p;
951
}
952
 
953
/* Copy allocno live range list given by its head R and return the
954
   result.  */
955
live_range_t
956
ira_copy_live_range_list (live_range_t r)
957
{
958
  live_range_t p, first, last;
959
 
960
  if (r == NULL)
961
    return NULL;
962
  for (first = last = NULL; r != NULL; r = r->next)
963
    {
964
      p = copy_live_range (r);
965
      if (first == NULL)
966
        first = p;
967
      else
968
        last->next = p;
969
      last = p;
970
    }
971
  return first;
972
}
973
 
974
/* Merge ranges R1 and R2 and returns the result.  The function
975
   maintains the order of ranges and tries to minimize number of the
976
   result ranges.  */
977
live_range_t
978
ira_merge_live_ranges (live_range_t r1, live_range_t r2)
979
{
980
  live_range_t first, last, temp;
981
 
982
  if (r1 == NULL)
983
    return r2;
984
  if (r2 == NULL)
985
    return r1;
986
  for (first = last = NULL; r1 != NULL && r2 != NULL;)
987
    {
988
      if (r1->start < r2->start)
989
        {
990
          temp = r1;
991
          r1 = r2;
992
          r2 = temp;
993
        }
994
      if (r1->start <= r2->finish + 1)
995
        {
996
          /* Intersected ranges: merge r1 and r2 into r1.  */
997
          r1->start = r2->start;
998
          if (r1->finish < r2->finish)
999
            r1->finish = r2->finish;
1000
          temp = r2;
1001
          r2 = r2->next;
1002
          ira_finish_live_range (temp);
1003
          if (r2 == NULL)
1004
            {
1005
              /* To try to merge with subsequent ranges in r1.  */
1006
              r2 = r1->next;
1007
              r1->next = NULL;
1008
            }
1009
        }
1010
      else
1011
        {
1012
          /* Add r1 to the result.  */
1013
          if (first == NULL)
1014
            first = last = r1;
1015
          else
1016
            {
1017
              last->next = r1;
1018
              last = r1;
1019
            }
1020
          r1 = r1->next;
1021
          if (r1 == NULL)
1022
            {
1023
              /* To try to merge with subsequent ranges in r2.  */
1024
              r1 = r2->next;
1025
              r2->next = NULL;
1026
            }
1027
        }
1028
    }
1029
  if (r1 != NULL)
1030
    {
1031
      if (first == NULL)
1032
        first = r1;
1033
      else
1034
        last->next = r1;
1035
      ira_assert (r1->next == NULL);
1036
    }
1037
  else if (r2 != NULL)
1038
    {
1039
      if (first == NULL)
1040
        first = r2;
1041
      else
1042
        last->next = r2;
1043
      ira_assert (r2->next == NULL);
1044
    }
1045
  else
1046
    {
1047
      ira_assert (last->next == NULL);
1048
    }
1049
  return first;
1050
}
1051
 
1052
/* Return TRUE if live ranges R1 and R2 intersect.  */
1053
bool
1054
ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1055
{
1056
  /* Remember the live ranges are always kept ordered.  */
1057
  while (r1 != NULL && r2 != NULL)
1058
    {
1059
      if (r1->start > r2->finish)
1060
        r1 = r1->next;
1061
      else if (r2->start > r1->finish)
1062
        r2 = r2->next;
1063
      else
1064
        return true;
1065
    }
1066
  return false;
1067
}
1068
 
1069
/* Free allocno live range R.  */
1070
void
1071
ira_finish_live_range (live_range_t r)
1072
{
1073
  pool_free (live_range_pool, r);
1074
}
1075
 
1076
/* Free list of allocno live ranges starting with R.  */
1077
void
1078
ira_finish_live_range_list (live_range_t r)
1079
{
1080
  live_range_t next_r;
1081
 
1082
  for (; r != NULL; r = next_r)
1083
    {
1084
      next_r = r->next;
1085
      ira_finish_live_range (r);
1086
    }
1087
}
1088
 
1089
/* Free updated register costs of allocno A.  */
1090
void
1091
ira_free_allocno_updated_costs (ira_allocno_t a)
1092
{
1093
  enum reg_class aclass;
1094
 
1095
  aclass = ALLOCNO_CLASS (a);
1096
  if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1097
    ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1098
  ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1099
  if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1100
    ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1101
                          aclass);
1102
  ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1103
}
1104
 
1105
/* Free and nullify all cost vectors allocated earlier for allocno
1106
   A.  */
1107
static void
1108
ira_free_allocno_costs (ira_allocno_t a)
1109
{
1110
  enum reg_class aclass = ALLOCNO_CLASS (a);
1111
  ira_object_t obj;
1112
  ira_allocno_object_iterator oi;
1113
 
1114
  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1115
    {
1116
      ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1117
      ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1118
      if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1119
        ira_free (OBJECT_CONFLICT_ARRAY (obj));
1120
      pool_free (object_pool, obj);
1121
    }
1122
 
1123
  ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1124
  if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1125
    ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1126
  if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1127
    ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1128
  if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1129
    ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1130
  if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1131
    ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1132
                          aclass);
1133
  ALLOCNO_HARD_REG_COSTS (a) = NULL;
1134
  ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1135
  ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1136
  ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1137
}
1138
 
1139
/* Free the memory allocated for allocno A.  */
1140
static void
1141
finish_allocno (ira_allocno_t a)
1142
{
1143
  ira_free_allocno_costs (a);
1144
  pool_free (allocno_pool, a);
1145
}
1146
 
1147
/* Free the memory allocated for all allocnos.  */
1148
static void
1149
finish_allocnos (void)
1150
{
1151
  ira_allocno_t a;
1152
  ira_allocno_iterator ai;
1153
 
1154
  FOR_EACH_ALLOCNO (a, ai)
1155
    finish_allocno (a);
1156
  ira_free (ira_regno_allocno_map);
1157
  VEC_free (ira_object_t, heap, ira_object_id_map_vec);
1158
  VEC_free (ira_allocno_t, heap, allocno_vec);
1159
  free_alloc_pool (allocno_pool);
1160
  free_alloc_pool (object_pool);
1161
  free_alloc_pool (live_range_pool);
1162
}
1163
 
1164
 
1165
 
1166
/* Pools for copies.  */
1167
static alloc_pool copy_pool;
1168
 
1169
/* Vec containing references to all created copies.  It is a
1170
   container of array ira_copies.  */
1171
static VEC(ira_copy_t,heap) *copy_vec;
1172
 
1173
/* The function initializes data concerning allocno copies.  */
1174
static void
1175
initiate_copies (void)
1176
{
1177
  copy_pool
1178
    = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1179
  copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
1180
  ira_copies = NULL;
1181
  ira_copies_num = 0;
1182
}
1183
 
1184
/* Return copy connecting A1 and A2 and originated from INSN of
1185
   LOOP_TREE_NODE if any.  */
1186
static ira_copy_t
1187
find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1188
                   ira_loop_tree_node_t loop_tree_node)
1189
{
1190
  ira_copy_t cp, next_cp;
1191
  ira_allocno_t another_a;
1192
 
1193
  for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1194
    {
1195
      if (cp->first == a1)
1196
        {
1197
          next_cp = cp->next_first_allocno_copy;
1198
          another_a = cp->second;
1199
        }
1200
      else if (cp->second == a1)
1201
        {
1202
          next_cp = cp->next_second_allocno_copy;
1203
          another_a = cp->first;
1204
        }
1205
      else
1206
        gcc_unreachable ();
1207
      if (another_a == a2 && cp->insn == insn
1208
          && cp->loop_tree_node == loop_tree_node)
1209
        return cp;
1210
    }
1211
  return NULL;
1212
}
1213
 
1214
/* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1215
   SECOND, FREQ, CONSTRAINT_P, and INSN.  */
1216
ira_copy_t
1217
ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1218
                 bool constraint_p, rtx insn,
1219
                 ira_loop_tree_node_t loop_tree_node)
1220
{
1221
  ira_copy_t cp;
1222
 
1223
  cp = (ira_copy_t) pool_alloc (copy_pool);
1224
  cp->num = ira_copies_num;
1225
  cp->first = first;
1226
  cp->second = second;
1227
  cp->freq = freq;
1228
  cp->constraint_p = constraint_p;
1229
  cp->insn = insn;
1230
  cp->loop_tree_node = loop_tree_node;
1231
  VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
1232
  ira_copies = VEC_address (ira_copy_t, copy_vec);
1233
  ira_copies_num = VEC_length (ira_copy_t, copy_vec);
1234
  return cp;
1235
}
1236
 
1237
/* Attach a copy CP to allocnos involved into the copy.  */
1238
void
1239
ira_add_allocno_copy_to_list (ira_copy_t cp)
1240
{
1241
  ira_allocno_t first = cp->first, second = cp->second;
1242
 
1243
  cp->prev_first_allocno_copy = NULL;
1244
  cp->prev_second_allocno_copy = NULL;
1245
  cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1246
  if (cp->next_first_allocno_copy != NULL)
1247
    {
1248
      if (cp->next_first_allocno_copy->first == first)
1249
        cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1250
      else
1251
        cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1252
    }
1253
  cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1254
  if (cp->next_second_allocno_copy != NULL)
1255
    {
1256
      if (cp->next_second_allocno_copy->second == second)
1257
        cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1258
      else
1259
        cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1260
    }
1261
  ALLOCNO_COPIES (first) = cp;
1262
  ALLOCNO_COPIES (second) = cp;
1263
}
1264
 
1265
/* Make a copy CP a canonical copy where number of the
1266
   first allocno is less than the second one.  */
1267
void
1268
ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1269
{
1270
  ira_allocno_t temp;
1271
  ira_copy_t temp_cp;
1272
 
1273
  if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1274
    return;
1275
 
1276
  temp = cp->first;
1277
  cp->first = cp->second;
1278
  cp->second = temp;
1279
 
1280
  temp_cp = cp->prev_first_allocno_copy;
1281
  cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1282
  cp->prev_second_allocno_copy = temp_cp;
1283
 
1284
  temp_cp = cp->next_first_allocno_copy;
1285
  cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1286
  cp->next_second_allocno_copy = temp_cp;
1287
}
1288
 
1289
/* Create (or update frequency if the copy already exists) and return
1290
   the copy of allocnos FIRST and SECOND with frequency FREQ
1291
   corresponding to move insn INSN (if any) and originated from
1292
   LOOP_TREE_NODE.  */
1293
ira_copy_t
1294
ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1295
                      bool constraint_p, rtx insn,
1296
                      ira_loop_tree_node_t loop_tree_node)
1297
{
1298
  ira_copy_t cp;
1299
 
1300
  if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1301
    {
1302
      cp->freq += freq;
1303
      return cp;
1304
    }
1305
  cp = ira_create_copy (first, second, freq, constraint_p, insn,
1306
                        loop_tree_node);
1307
  ira_assert (first != NULL && second != NULL);
1308
  ira_add_allocno_copy_to_list (cp);
1309
  ira_swap_allocno_copy_ends_if_necessary (cp);
1310
  return cp;
1311
}
1312
 
1313
/* Print info about copy CP into file F.  */
1314
static void
1315
print_copy (FILE *f, ira_copy_t cp)
1316
{
1317
  fprintf (f, "  cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1318
           ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1319
           ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1320
           cp->insn != NULL
1321
           ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1322
}
1323
 
1324
/* Print info about copy CP into stderr.  */
1325
void
1326
ira_debug_copy (ira_copy_t cp)
1327
{
1328
  print_copy (stderr, cp);
1329
}
1330
 
1331
/* Print info about all copies into file F.  */
1332
static void
1333
print_copies (FILE *f)
1334
{
1335
  ira_copy_t cp;
1336
  ira_copy_iterator ci;
1337
 
1338
  FOR_EACH_COPY (cp, ci)
1339
    print_copy (f, cp);
1340
}
1341
 
1342
/* Print info about all copies into stderr.  */
1343
void
1344
ira_debug_copies (void)
1345
{
1346
  print_copies (stderr);
1347
}
1348
 
1349
/* Print info about copies involving allocno A into file F.  */
1350
static void
1351
print_allocno_copies (FILE *f, ira_allocno_t a)
1352
{
1353
  ira_allocno_t another_a;
1354
  ira_copy_t cp, next_cp;
1355
 
1356
  fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1357
  for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1358
    {
1359
      if (cp->first == a)
1360
        {
1361
          next_cp = cp->next_first_allocno_copy;
1362
          another_a = cp->second;
1363
        }
1364
      else if (cp->second == a)
1365
        {
1366
          next_cp = cp->next_second_allocno_copy;
1367
          another_a = cp->first;
1368
        }
1369
      else
1370
        gcc_unreachable ();
1371
      fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1372
               ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1373
    }
1374
  fprintf (f, "\n");
1375
}
1376
 
1377
/* Print info about copies involving allocno A into stderr.  */
1378
void
1379
ira_debug_allocno_copies (ira_allocno_t a)
1380
{
1381
  print_allocno_copies (stderr, a);
1382
}
1383
 
1384
/* The function frees memory allocated for copy CP.  */
1385
static void
1386
finish_copy (ira_copy_t cp)
1387
{
1388
  pool_free (copy_pool, cp);
1389
}
1390
 
1391
 
1392
/* Free memory allocated for all copies.  */
1393
static void
1394
finish_copies (void)
1395
{
1396
  ira_copy_t cp;
1397
  ira_copy_iterator ci;
1398
 
1399
  FOR_EACH_COPY (cp, ci)
1400
    finish_copy (cp);
1401
  VEC_free (ira_copy_t, heap, copy_vec);
1402
  free_alloc_pool (copy_pool);
1403
}
1404
 
1405
 
1406
 
1407
/* Pools for cost vectors.  It is defined only for allocno classes.  */
1408
static alloc_pool cost_vector_pool[N_REG_CLASSES];
1409
 
1410
/* The function initiates work with hard register cost vectors.  It
1411
   creates allocation pool for each allocno class.  */
1412
static void
1413
initiate_cost_vectors (void)
1414
{
1415
  int i;
1416
  enum reg_class aclass;
1417
 
1418
  for (i = 0; i < ira_allocno_classes_num; i++)
1419
    {
1420
      aclass = ira_allocno_classes[i];
1421
      cost_vector_pool[aclass]
1422
        = create_alloc_pool ("cost vectors",
1423
                             sizeof (int) * ira_class_hard_regs_num[aclass],
1424
                             100);
1425
    }
1426
}
1427
 
1428
/* Allocate and return a cost vector VEC for ACLASS.  */
1429
int *
1430
ira_allocate_cost_vector (reg_class_t aclass)
1431
{
1432
  return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
1433
}
1434
 
1435
/* Free a cost vector VEC for ACLASS.  */
1436
void
1437
ira_free_cost_vector (int *vec, reg_class_t aclass)
1438
{
1439
  ira_assert (vec != NULL);
1440
  pool_free (cost_vector_pool[(int) aclass], vec);
1441
}
1442
 
1443
/* Finish work with hard register cost vectors.  Release allocation
1444
   pool for each allocno class.  */
1445
static void
1446
finish_cost_vectors (void)
1447
{
1448
  int i;
1449
  enum reg_class aclass;
1450
 
1451
  for (i = 0; i < ira_allocno_classes_num; i++)
1452
    {
1453
      aclass = ira_allocno_classes[i];
1454
      free_alloc_pool (cost_vector_pool[aclass]);
1455
    }
1456
}
1457
 
1458
 
1459
 
1460
/* The current loop tree node and its regno allocno map.  */
1461
ira_loop_tree_node_t ira_curr_loop_tree_node;
1462
ira_allocno_t *ira_curr_regno_allocno_map;
1463
 
1464
/* This recursive function traverses loop tree with root LOOP_NODE
1465
   calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1466
   correspondingly in preorder and postorder.  The function sets up
1467
   IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP.  If BB_P,
1468
   basic block nodes of LOOP_NODE is also processed (before its
1469
   subloop nodes).  */
1470
void
1471
ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1472
                        void (*preorder_func) (ira_loop_tree_node_t),
1473
                        void (*postorder_func) (ira_loop_tree_node_t))
1474
{
1475
  ira_loop_tree_node_t subloop_node;
1476
 
1477
  ira_assert (loop_node->bb == NULL);
1478
  ira_curr_loop_tree_node = loop_node;
1479
  ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1480
 
1481
  if (preorder_func != NULL)
1482
    (*preorder_func) (loop_node);
1483
 
1484
  if (bb_p)
1485
    for (subloop_node = loop_node->children;
1486
         subloop_node != NULL;
1487
         subloop_node = subloop_node->next)
1488
      if (subloop_node->bb != NULL)
1489
        {
1490
          if (preorder_func != NULL)
1491
            (*preorder_func) (subloop_node);
1492
 
1493
          if (postorder_func != NULL)
1494
            (*postorder_func) (subloop_node);
1495
        }
1496
 
1497
  for (subloop_node = loop_node->subloops;
1498
       subloop_node != NULL;
1499
       subloop_node = subloop_node->subloop_next)
1500
    {
1501
      ira_assert (subloop_node->bb == NULL);
1502
      ira_traverse_loop_tree (bb_p, subloop_node,
1503
                              preorder_func, postorder_func);
1504
    }
1505
 
1506
  ira_curr_loop_tree_node = loop_node;
1507
  ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1508
 
1509
  if (postorder_func != NULL)
1510
    (*postorder_func) (loop_node);
1511
}
1512
 
1513
 
1514
 
1515
/* The basic block currently being processed.  */
1516
static basic_block curr_bb;
1517
 
1518
/* This recursive function creates allocnos corresponding to
1519
   pseudo-registers containing in X.  True OUTPUT_P means that X is
1520
   a lvalue.  */
1521
static void
1522
create_insn_allocnos (rtx x, bool output_p)
1523
{
1524
  int i, j;
1525
  const char *fmt;
1526
  enum rtx_code code = GET_CODE (x);
1527
 
1528
  if (code == REG)
1529
    {
1530
      int regno;
1531
 
1532
      if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1533
        {
1534
          ira_allocno_t a;
1535
 
1536
          if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1537
            a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1538
 
1539
          ALLOCNO_NREFS (a)++;
1540
          ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1541
          if (output_p)
1542
            bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1543
        }
1544
      return;
1545
    }
1546
  else if (code == SET)
1547
    {
1548
      create_insn_allocnos (SET_DEST (x), true);
1549
      create_insn_allocnos (SET_SRC (x), false);
1550
      return;
1551
    }
1552
  else if (code == CLOBBER)
1553
    {
1554
      create_insn_allocnos (XEXP (x, 0), true);
1555
      return;
1556
    }
1557
  else if (code == MEM)
1558
    {
1559
      create_insn_allocnos (XEXP (x, 0), false);
1560
      return;
1561
    }
1562
  else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1563
           code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1564
    {
1565
      create_insn_allocnos (XEXP (x, 0), true);
1566
      create_insn_allocnos (XEXP (x, 0), false);
1567
      return;
1568
    }
1569
 
1570
  fmt = GET_RTX_FORMAT (code);
1571
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1572
    {
1573
      if (fmt[i] == 'e')
1574
        create_insn_allocnos (XEXP (x, i), output_p);
1575
      else if (fmt[i] == 'E')
1576
        for (j = 0; j < XVECLEN (x, i); j++)
1577
          create_insn_allocnos (XVECEXP (x, i, j), output_p);
1578
    }
1579
}
1580
 
1581
/* Create allocnos corresponding to pseudo-registers living in the
1582
   basic block represented by the corresponding loop tree node
1583
   BB_NODE.  */
1584
static void
1585
create_bb_allocnos (ira_loop_tree_node_t bb_node)
1586
{
1587
  basic_block bb;
1588
  rtx insn;
1589
  unsigned int i;
1590
  bitmap_iterator bi;
1591
 
1592
  curr_bb = bb = bb_node->bb;
1593
  ira_assert (bb != NULL);
1594
  FOR_BB_INSNS_REVERSE (bb, insn)
1595
    if (NONDEBUG_INSN_P (insn))
1596
      create_insn_allocnos (PATTERN (insn), false);
1597
  /* It might be a allocno living through from one subloop to
1598
     another.  */
1599
  EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
1600
    if (ira_curr_regno_allocno_map[i] == NULL)
1601
      ira_create_allocno (i, false, ira_curr_loop_tree_node);
1602
}
1603
 
1604
/* Create allocnos corresponding to pseudo-registers living on edge E
1605
   (a loop entry or exit).  Also mark the allocnos as living on the
1606
   loop border. */
1607
static void
1608
create_loop_allocnos (edge e)
1609
{
1610
  unsigned int i;
1611
  bitmap live_in_regs, border_allocnos;
1612
  bitmap_iterator bi;
1613
  ira_loop_tree_node_t parent;
1614
 
1615
  live_in_regs = DF_LR_IN (e->dest);
1616
  border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1617
  EXECUTE_IF_SET_IN_REG_SET (DF_LR_OUT (e->src),
1618
                             FIRST_PSEUDO_REGISTER, i, bi)
1619
    if (bitmap_bit_p (live_in_regs, i))
1620
      {
1621
        if (ira_curr_regno_allocno_map[i] == NULL)
1622
          {
1623
            /* The order of creations is important for right
1624
               ira_regno_allocno_map.  */
1625
            if ((parent = ira_curr_loop_tree_node->parent) != NULL
1626
                && parent->regno_allocno_map[i] == NULL)
1627
              ira_create_allocno (i, false, parent);
1628
            ira_create_allocno (i, false, ira_curr_loop_tree_node);
1629
          }
1630
        bitmap_set_bit (border_allocnos,
1631
                        ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1632
      }
1633
}
1634
 
1635
/* Create allocnos corresponding to pseudo-registers living in loop
1636
   represented by the corresponding loop tree node LOOP_NODE.  This
1637
   function is called by ira_traverse_loop_tree.  */
1638
static void
1639
create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1640
{
1641
  if (loop_node->bb != NULL)
1642
    create_bb_allocnos (loop_node);
1643
  else if (loop_node != ira_loop_tree_root)
1644
    {
1645
      int i;
1646
      edge_iterator ei;
1647
      edge e;
1648
      VEC (edge, heap) *edges;
1649
 
1650
      ira_assert (current_loops != NULL);
1651
      FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1652
        if (e->src != loop_node->loop->latch)
1653
          create_loop_allocnos (e);
1654
 
1655
      edges = get_loop_exit_edges (loop_node->loop);
1656
      FOR_EACH_VEC_ELT (edge, edges, i, e)
1657
        create_loop_allocnos (e);
1658
      VEC_free (edge, heap, edges);
1659
    }
1660
}
1661
 
1662
/* Propagate information about allocnos modified inside the loop given
1663
   by its LOOP_TREE_NODE to its parent.  */
1664
static void
1665
propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1666
{
1667
  if (loop_tree_node == ira_loop_tree_root)
1668
    return;
1669
  ira_assert (loop_tree_node->bb == NULL);
1670
  bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1671
                   loop_tree_node->modified_regnos);
1672
}
1673
 
1674
/* Propagate new info about allocno A (see comments about accumulated
1675
   info in allocno definition) to the corresponding allocno on upper
1676
   loop tree level.  So allocnos on upper levels accumulate
1677
   information about the corresponding allocnos in nested regions.
1678
   The new info means allocno info finally calculated in this
1679
   file.  */
1680
static void
1681
propagate_allocno_info (void)
1682
{
1683
  int i;
1684
  ira_allocno_t a, parent_a;
1685
  ira_loop_tree_node_t parent;
1686
  enum reg_class aclass;
1687
 
1688
  if (flag_ira_region != IRA_REGION_ALL
1689
      && flag_ira_region != IRA_REGION_MIXED)
1690
    return;
1691
  for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1692
    for (a = ira_regno_allocno_map[i];
1693
         a != NULL;
1694
         a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1695
      if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1696
          && (parent_a = parent->regno_allocno_map[i]) != NULL
1697
          /* There are no caps yet at this point.  So use
1698
             border_allocnos to find allocnos for the propagation.  */
1699
          && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1700
                           ALLOCNO_NUM (a)))
1701
        {
1702
          if (! ALLOCNO_BAD_SPILL_P (a))
1703
            ALLOCNO_BAD_SPILL_P (parent_a) = false;
1704
          ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1705
          ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1706
          ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1707
          merge_hard_reg_conflicts (a, parent_a, true);
1708
          ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1709
            += ALLOCNO_CALLS_CROSSED_NUM (a);
1710
          ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1711
            += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1712
          aclass = ALLOCNO_CLASS (a);
1713
          ira_assert (aclass == ALLOCNO_CLASS (parent_a));
1714
          ira_allocate_and_accumulate_costs
1715
            (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
1716
             ALLOCNO_HARD_REG_COSTS (a));
1717
          ira_allocate_and_accumulate_costs
1718
            (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1719
             aclass,
1720
             ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1721
          ALLOCNO_CLASS_COST (parent_a)
1722
            += ALLOCNO_CLASS_COST (a);
1723
          ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1724
        }
1725
}
1726
 
1727
/* Create allocnos corresponding to pseudo-registers in the current
1728
   function.  Traverse the loop tree for this.  */
1729
static void
1730
create_allocnos (void)
1731
{
1732
  /* We need to process BB first to correctly link allocnos by member
1733
     next_regno_allocno.  */
1734
  ira_traverse_loop_tree (true, ira_loop_tree_root,
1735
                          create_loop_tree_node_allocnos, NULL);
1736
  if (optimize)
1737
    ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1738
                            propagate_modified_regnos);
1739
}
1740
 
1741
 
1742
 
1743
/* The page contains function to remove some regions from a separate
1744
   register allocation.  We remove regions whose separate allocation
1745
   will hardly improve the result.  As a result we speed up regional
1746
   register allocation.  */
1747
 
1748
/* The function changes the object in range list given by R to OBJ.  */
1749
static void
1750
change_object_in_range_list (live_range_t r, ira_object_t obj)
1751
{
1752
  for (; r != NULL; r = r->next)
1753
    r->object = obj;
1754
}
1755
 
1756
/* Move all live ranges associated with allocno FROM to allocno TO.  */
1757
static void
1758
move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1759
{
1760
  int i;
1761
  int n = ALLOCNO_NUM_OBJECTS (from);
1762
 
1763
  gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1764
 
1765
  for (i = 0; i < n; i++)
1766
    {
1767
      ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1768
      ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1769
      live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1770
 
1771
      if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1772
        {
1773
          fprintf (ira_dump_file,
1774
                   "      Moving ranges of a%dr%d to a%dr%d: ",
1775
                   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1776
                   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1777
          ira_print_live_range_list (ira_dump_file, lr);
1778
        }
1779
      change_object_in_range_list (lr, to_obj);
1780
      OBJECT_LIVE_RANGES (to_obj)
1781
        = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1782
      OBJECT_LIVE_RANGES (from_obj) = NULL;
1783
    }
1784
}
1785
 
1786
static void
1787
copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1788
{
1789
  int i;
1790
  int n = ALLOCNO_NUM_OBJECTS (from);
1791
 
1792
  gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1793
 
1794
  for (i = 0; i < n; i++)
1795
    {
1796
      ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1797
      ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1798
      live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1799
 
1800
      if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1801
        {
1802
          fprintf (ira_dump_file, "      Copying ranges of a%dr%d to a%dr%d: ",
1803
                   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1804
                   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1805
          ira_print_live_range_list (ira_dump_file, lr);
1806
        }
1807
      lr = ira_copy_live_range_list (lr);
1808
      change_object_in_range_list (lr, to_obj);
1809
      OBJECT_LIVE_RANGES (to_obj)
1810
        = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1811
    }
1812
}
1813
 
1814
/* Return TRUE if NODE represents a loop with low register
1815
   pressure.  */
1816
static bool
1817
low_pressure_loop_node_p (ira_loop_tree_node_t node)
1818
{
1819
  int i;
1820
  enum reg_class pclass;
1821
 
1822
  if (node->bb != NULL)
1823
    return false;
1824
 
1825
  for (i = 0; i < ira_pressure_classes_num; i++)
1826
    {
1827
      pclass = ira_pressure_classes[i];
1828
      if (node->reg_pressure[pclass] > ira_available_class_regs[pclass]
1829
          && ira_available_class_regs[pclass] > 1)
1830
        return false;
1831
    }
1832
  return true;
1833
}
1834
 
1835
#ifdef STACK_REGS
1836
/* Return TRUE if LOOP has a complex enter or exit edge.  We don't
1837
   form a region from such loop if the target use stack register
1838
   because reg-stack.c can not deal with such edges.  */
1839
static bool
1840
loop_with_complex_edge_p (struct loop *loop)
1841
{
1842
  int i;
1843
  edge_iterator ei;
1844
  edge e;
1845
  VEC (edge, heap) *edges;
1846
 
1847
  FOR_EACH_EDGE (e, ei, loop->header->preds)
1848
    if (e->flags & EDGE_EH)
1849
      return true;
1850
  edges = get_loop_exit_edges (loop);
1851
  FOR_EACH_VEC_ELT (edge, edges, i, e)
1852
    if (e->flags & EDGE_COMPLEX)
1853
      return true;
1854
  return false;
1855
}
1856
#endif
1857
 
1858
/* Sort loops for marking them for removal.  We put already marked
1859
   loops first, then less frequent loops next, and then outer loops
1860
   next.  */
1861
static int
1862
loop_compare_func (const void *v1p, const void *v2p)
1863
{
1864
  int diff;
1865
  ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
1866
  ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
1867
 
1868
  ira_assert (l1->parent != NULL && l2->parent != NULL);
1869
  if (l1->to_remove_p && ! l2->to_remove_p)
1870
    return -1;
1871
  if (! l1->to_remove_p && l2->to_remove_p)
1872
    return 1;
1873
  if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
1874
    return diff;
1875
  if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
1876
    return diff;
1877
  /* Make sorting stable.  */
1878
  return l1->loop_num - l2->loop_num;
1879
}
1880
 
1881
/* Mark loops which should be removed from regional allocation.  We
1882
   remove a loop with low register pressure inside another loop with
1883
   register pressure.  In this case a separate allocation of the loop
1884
   hardly helps (for irregular register file architecture it could
1885
   help by choosing a better hard register in the loop but we prefer
1886
   faster allocation even in this case).  We also remove cheap loops
1887
   if there are more than IRA_MAX_LOOPS_NUM of them.  Loop with EH
1888
   exit or enter edges are removed too because the allocation might
1889
   require put pseudo moves on the EH edges (we could still do this
1890
   for pseudos with caller saved hard registers in some cases but it
1891
   is impossible to say here or during top-down allocation pass what
1892
   hard register the pseudos get finally).  */
1893
static void
1894
mark_loops_for_removal (void)
1895
{
1896
  int i, n;
1897
  ira_loop_tree_node_t *sorted_loops;
1898
  loop_p loop;
1899
 
1900
  ira_assert (current_loops != NULL);
1901
  sorted_loops
1902
    = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
1903
                                             * VEC_length (loop_p,
1904
                                                           ira_loops.larray));
1905
  for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1906
    if (ira_loop_nodes[i].regno_allocno_map != NULL)
1907
      {
1908
        if (ira_loop_nodes[i].parent == NULL)
1909
          {
1910
            /* Don't remove the root.  */
1911
            ira_loop_nodes[i].to_remove_p = false;
1912
            continue;
1913
          }
1914
        sorted_loops[n++] = &ira_loop_nodes[i];
1915
        ira_loop_nodes[i].to_remove_p
1916
          = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
1917
              && low_pressure_loop_node_p (&ira_loop_nodes[i]))
1918
#ifdef STACK_REGS
1919
             || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
1920
#endif
1921
             );
1922
      }
1923
  qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
1924
  for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
1925
    {
1926
      sorted_loops[i]->to_remove_p = true;
1927
      if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1928
        fprintf
1929
          (ira_dump_file,
1930
           "  Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
1931
           sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
1932
           sorted_loops[i]->loop->header->frequency,
1933
           loop_depth (sorted_loops[i]->loop),
1934
           low_pressure_loop_node_p (sorted_loops[i]->parent)
1935
           && low_pressure_loop_node_p (sorted_loops[i])
1936
           ? "low pressure" : "cheap loop");
1937
    }
1938
  ira_free (sorted_loops);
1939
}
1940
 
1941
/* Mark all loops but root for removing.  */
1942
static void
1943
mark_all_loops_for_removal (void)
1944
{
1945
  int i;
1946
  loop_p loop;
1947
 
1948
  ira_assert (current_loops != NULL);
1949
  FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
1950
    if (ira_loop_nodes[i].regno_allocno_map != NULL)
1951
      {
1952
        if (ira_loop_nodes[i].parent == NULL)
1953
          {
1954
            /* Don't remove the root.  */
1955
            ira_loop_nodes[i].to_remove_p = false;
1956
            continue;
1957
          }
1958
        ira_loop_nodes[i].to_remove_p = true;
1959
        if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1960
          fprintf
1961
            (ira_dump_file,
1962
             "  Mark loop %d (header %d, freq %d, depth %d) for removal\n",
1963
             ira_loop_nodes[i].loop_num,
1964
             ira_loop_nodes[i].loop->header->index,
1965
             ira_loop_nodes[i].loop->header->frequency,
1966
             loop_depth (ira_loop_nodes[i].loop));
1967
      }
1968
}
1969
 
1970
/* Definition of vector of loop tree nodes.  */
1971
DEF_VEC_P(ira_loop_tree_node_t);
1972
DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
1973
 
1974
/* Vec containing references to all removed loop tree nodes.  */
1975
static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
1976
 
1977
/* Vec containing references to all children of loop tree nodes.  */
1978
static VEC(ira_loop_tree_node_t,heap) *children_vec;
1979
 
1980
/* Remove subregions of NODE if their separate allocation will not
1981
   improve the result.  */
1982
static void
1983
remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
1984
{
1985
  unsigned int start;
1986
  bool remove_p;
1987
  ira_loop_tree_node_t subnode;
1988
 
1989
  remove_p = node->to_remove_p;
1990
  if (! remove_p)
1991
    VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
1992
  start = VEC_length (ira_loop_tree_node_t, children_vec);
1993
  for (subnode = node->children; subnode != NULL; subnode = subnode->next)
1994
    if (subnode->bb == NULL)
1995
      remove_uneccesary_loop_nodes_from_loop_tree (subnode);
1996
    else
1997
      VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
1998
  node->children = node->subloops = NULL;
1999
  if (remove_p)
2000
    {
2001
      VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
2002
      return;
2003
    }
2004
  while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
2005
    {
2006
      subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
2007
      subnode->parent = node;
2008
      subnode->next = node->children;
2009
      node->children = subnode;
2010
      if (subnode->bb == NULL)
2011
        {
2012
          subnode->subloop_next = node->subloops;
2013
          node->subloops = subnode;
2014
        }
2015
    }
2016
}
2017
 
2018
/* Return TRUE if NODE is inside PARENT.  */
2019
static bool
2020
loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2021
{
2022
  for (node = node->parent; node != NULL; node = node->parent)
2023
    if (node == parent)
2024
      return true;
2025
  return false;
2026
}
2027
 
2028
/* Sort allocnos according to their order in regno allocno list.  */
2029
static int
2030
regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2031
{
2032
  ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2033
  ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2034
  ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2035
  ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2036
 
2037
  if (loop_is_inside_p (n1, n2))
2038
    return -1;
2039
  else if (loop_is_inside_p (n2, n1))
2040
    return 1;
2041
  /* If allocnos are equally good, sort by allocno numbers, so that
2042
     the results of qsort leave nothing to chance.  We put allocnos
2043
     with higher number first in the list because it is the original
2044
     order for allocnos from loops on the same levels.  */
2045
  return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2046
}
2047
 
2048
/* This array is used to sort allocnos to restore allocno order in
2049
   the regno allocno list.  */
2050
static ira_allocno_t *regno_allocnos;
2051
 
2052
/* Restore allocno order for REGNO in the regno allocno list.  */
2053
static void
2054
ira_rebuild_regno_allocno_list (int regno)
2055
{
2056
  int i, n;
2057
  ira_allocno_t a;
2058
 
2059
  for (n = 0, a = ira_regno_allocno_map[regno];
2060
       a != NULL;
2061
       a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2062
    regno_allocnos[n++] = a;
2063
  ira_assert (n > 0);
2064
  qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2065
         regno_allocno_order_compare_func);
2066
  for (i = 1; i < n; i++)
2067
    ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2068
  ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2069
  ira_regno_allocno_map[regno] = regno_allocnos[0];
2070
  if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2071
    fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2072
}
2073
 
2074
/* Propagate info from allocno FROM_A to allocno A.  */
2075
static void
2076
propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2077
{
2078
  enum reg_class aclass;
2079
 
2080
  merge_hard_reg_conflicts (from_a, a, false);
2081
  ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2082
  ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2083
  ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2084
  ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2085
  ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2086
    += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2087
  if (! ALLOCNO_BAD_SPILL_P (from_a))
2088
    ALLOCNO_BAD_SPILL_P (a) = false;
2089
  aclass = ALLOCNO_CLASS (from_a);
2090
  ira_assert (aclass == ALLOCNO_CLASS (a));
2091
  ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2092
                                     ALLOCNO_HARD_REG_COSTS (from_a));
2093
  ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2094
                                     aclass,
2095
                                     ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2096
  ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2097
  ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2098
}
2099
 
2100
/* Remove allocnos from loops removed from the allocation
2101
   consideration.  */
2102
static void
2103
remove_unnecessary_allocnos (void)
2104
{
2105
  int regno;
2106
  bool merged_p, rebuild_p;
2107
  ira_allocno_t a, prev_a, next_a, parent_a;
2108
  ira_loop_tree_node_t a_node, parent;
2109
 
2110
  merged_p = false;
2111
  regno_allocnos = NULL;
2112
  for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2113
    {
2114
      rebuild_p = false;
2115
      for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2116
           a != NULL;
2117
           a = next_a)
2118
        {
2119
          next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2120
          a_node = ALLOCNO_LOOP_TREE_NODE (a);
2121
          if (! a_node->to_remove_p)
2122
            prev_a = a;
2123
          else
2124
            {
2125
              for (parent = a_node->parent;
2126
                   (parent_a = parent->regno_allocno_map[regno]) == NULL
2127
                     && parent->to_remove_p;
2128
                   parent = parent->parent)
2129
                ;
2130
              if (parent_a == NULL)
2131
                {
2132
                  /* There are no allocnos with the same regno in
2133
                     upper region -- just move the allocno to the
2134
                     upper region.  */
2135
                  prev_a = a;
2136
                  ALLOCNO_LOOP_TREE_NODE (a) = parent;
2137
                  parent->regno_allocno_map[regno] = a;
2138
                  bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2139
                  rebuild_p = true;
2140
                }
2141
              else
2142
                {
2143
                  /* Remove the allocno and update info of allocno in
2144
                     the upper region.  */
2145
                  if (prev_a == NULL)
2146
                    ira_regno_allocno_map[regno] = next_a;
2147
                  else
2148
                    ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2149
                  move_allocno_live_ranges (a, parent_a);
2150
                  merged_p = true;
2151
                  propagate_some_info_from_allocno (parent_a, a);
2152
                  /* Remove it from the corresponding regno allocno
2153
                     map to avoid info propagation of subsequent
2154
                     allocno into this already removed allocno.  */
2155
                  a_node->regno_allocno_map[regno] = NULL;
2156
                  finish_allocno (a);
2157
                }
2158
            }
2159
        }
2160
      if (rebuild_p)
2161
        /* We need to restore the order in regno allocno list.  */
2162
        {
2163
          if (regno_allocnos == NULL)
2164
            regno_allocnos
2165
              = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2166
                                                * ira_allocnos_num);
2167
          ira_rebuild_regno_allocno_list (regno);
2168
        }
2169
    }
2170
  if (merged_p)
2171
    ira_rebuild_start_finish_chains ();
2172
  if (regno_allocnos != NULL)
2173
    ira_free (regno_allocnos);
2174
}
2175
 
2176
/* Remove allocnos from all loops but the root.  */
2177
static void
2178
remove_low_level_allocnos (void)
2179
{
2180
  int regno;
2181
  bool merged_p, propagate_p;
2182
  ira_allocno_t a, top_a;
2183
  ira_loop_tree_node_t a_node, parent;
2184
  ira_allocno_iterator ai;
2185
 
2186
  merged_p = false;
2187
  FOR_EACH_ALLOCNO (a, ai)
2188
    {
2189
      a_node = ALLOCNO_LOOP_TREE_NODE (a);
2190
      if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2191
        continue;
2192
      regno = ALLOCNO_REGNO (a);
2193
      if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2194
        {
2195
          ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2196
          ira_loop_tree_root->regno_allocno_map[regno] = a;
2197
          continue;
2198
        }
2199
      propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2200
      /* Remove the allocno and update info of allocno in the upper
2201
         region.  */
2202
      move_allocno_live_ranges (a, top_a);
2203
      merged_p = true;
2204
      if (propagate_p)
2205
        propagate_some_info_from_allocno (top_a, a);
2206
    }
2207
  FOR_EACH_ALLOCNO (a, ai)
2208
    {
2209
      a_node = ALLOCNO_LOOP_TREE_NODE (a);
2210
      if (a_node == ira_loop_tree_root)
2211
        continue;
2212
      parent = a_node->parent;
2213
      regno = ALLOCNO_REGNO (a);
2214
      if (ALLOCNO_CAP_MEMBER (a) != NULL)
2215
        ira_assert (ALLOCNO_CAP (a) != NULL);
2216
      else if (ALLOCNO_CAP (a) == NULL)
2217
        ira_assert (parent->regno_allocno_map[regno] != NULL);
2218
    }
2219
  FOR_EACH_ALLOCNO (a, ai)
2220
    {
2221
      regno = ALLOCNO_REGNO (a);
2222
      if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2223
        {
2224
          ira_object_t obj;
2225
          ira_allocno_object_iterator oi;
2226
 
2227
          ira_regno_allocno_map[regno] = a;
2228
          ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2229
          ALLOCNO_CAP_MEMBER (a) = NULL;
2230
          FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2231
            COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2232
                               OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2233
#ifdef STACK_REGS
2234
          if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2235
            ALLOCNO_NO_STACK_REG_P (a) = true;
2236
#endif
2237
        }
2238
      else
2239
        finish_allocno (a);
2240
    }
2241
  if (merged_p)
2242
    ira_rebuild_start_finish_chains ();
2243
}
2244
 
2245
/* Remove loops from consideration.  We remove all loops except for
2246
   root if ALL_P or loops for which a separate allocation will not
2247
   improve the result.  We have to do this after allocno creation and
2248
   their costs and allocno class evaluation because only after that
2249
   the register pressure can be known and is calculated.  */
2250
static void
2251
remove_unnecessary_regions (bool all_p)
2252
{
2253
  if (current_loops == NULL)
2254
    return;
2255
  if (all_p)
2256
    mark_all_loops_for_removal ();
2257
  else
2258
    mark_loops_for_removal ();
2259
  children_vec
2260
    = VEC_alloc (ira_loop_tree_node_t, heap,
2261
                 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2262
  removed_loop_vec
2263
    = VEC_alloc (ira_loop_tree_node_t, heap,
2264
                 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2265
  remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
2266
  VEC_free (ira_loop_tree_node_t, heap, children_vec);
2267
  if (all_p)
2268
    remove_low_level_allocnos ();
2269
  else
2270
    remove_unnecessary_allocnos ();
2271
  while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
2272
    finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
2273
  VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
2274
}
2275
 
2276
 
2277
 
2278
/* At this point true value of allocno attribute bad_spill_p means
2279
   that there is an insn where allocno occurs and where the allocno
2280
   can not be used as memory.  The function updates the attribute, now
2281
   it can be true only for allocnos which can not be used as memory in
2282
   an insn and in whose live ranges there is other allocno deaths.
2283
   Spilling allocnos with true value will not improve the code because
2284
   it will not make other allocnos colorable and additional reloads
2285
   for the corresponding pseudo will be generated in reload pass for
2286
   each insn it occurs.
2287
 
2288
   This is a trick mentioned in one classic article of Chaitin etc
2289
   which is frequently omitted in other implementations of RA based on
2290
   graph coloring.  */
2291
static void
2292
update_bad_spill_attribute (void)
2293
{
2294
  int i;
2295
  ira_allocno_t a;
2296
  ira_allocno_iterator ai;
2297
  ira_allocno_object_iterator aoi;
2298
  ira_object_t obj;
2299
  live_range_t r;
2300
  enum reg_class aclass;
2301
  bitmap_head dead_points[N_REG_CLASSES];
2302
 
2303
  for (i = 0; i < ira_allocno_classes_num; i++)
2304
    {
2305
      aclass = ira_allocno_classes[i];
2306
      bitmap_initialize (&dead_points[aclass], &reg_obstack);
2307
    }
2308
  FOR_EACH_ALLOCNO (a, ai)
2309
    {
2310
      aclass = ALLOCNO_CLASS (a);
2311
      if (aclass == NO_REGS)
2312
        continue;
2313
      FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2314
        for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2315
          bitmap_set_bit (&dead_points[aclass], r->finish);
2316
    }
2317
  FOR_EACH_ALLOCNO (a, ai)
2318
    {
2319
      aclass = ALLOCNO_CLASS (a);
2320
      if (aclass == NO_REGS)
2321
        continue;
2322
      if (! ALLOCNO_BAD_SPILL_P (a))
2323
        continue;
2324
      FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2325
        {
2326
          for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2327
            {
2328
              for (i = r->start + 1; i < r->finish; i++)
2329
                if (bitmap_bit_p (&dead_points[aclass], i))
2330
                  break;
2331
              if (i < r->finish)
2332
                break;
2333
            }
2334
          if (r != NULL)
2335
            {
2336
              ALLOCNO_BAD_SPILL_P (a) = false;
2337
              break;
2338
            }
2339
        }
2340
    }
2341
  for (i = 0; i < ira_allocno_classes_num; i++)
2342
    {
2343
      aclass = ira_allocno_classes[i];
2344
      bitmap_clear (&dead_points[aclass]);
2345
    }
2346
}
2347
 
2348
 
2349
 
2350
/* Set up minimal and maximal live range points for allocnos.  */
2351
static void
2352
setup_min_max_allocno_live_range_point (void)
2353
{
2354
  int i;
2355
  ira_allocno_t a, parent_a, cap;
2356
  ira_allocno_iterator ai;
2357
#ifdef ENABLE_IRA_CHECKING
2358
  ira_object_iterator oi;
2359
  ira_object_t obj;
2360
#endif
2361
  live_range_t r;
2362
  ira_loop_tree_node_t parent;
2363
 
2364
  FOR_EACH_ALLOCNO (a, ai)
2365
    {
2366
      int n = ALLOCNO_NUM_OBJECTS (a);
2367
 
2368
      for (i = 0; i < n; i++)
2369
        {
2370
          ira_object_t obj = ALLOCNO_OBJECT (a, i);
2371
          r = OBJECT_LIVE_RANGES (obj);
2372
          if (r == NULL)
2373
            continue;
2374
          OBJECT_MAX (obj) = r->finish;
2375
          for (; r->next != NULL; r = r->next)
2376
            ;
2377
          OBJECT_MIN (obj) = r->start;
2378
        }
2379
    }
2380
  for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2381
    for (a = ira_regno_allocno_map[i];
2382
         a != NULL;
2383
         a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2384
      {
2385
        int j;
2386
        int n = ALLOCNO_NUM_OBJECTS (a);
2387
 
2388
        for (j = 0; j < n; j++)
2389
          {
2390
            ira_object_t obj = ALLOCNO_OBJECT (a, j);
2391
            ira_object_t parent_obj;
2392
 
2393
            if (OBJECT_MAX (obj) < 0)
2394
              continue;
2395
            ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2396
            /* Accumulation of range info.  */
2397
            if (ALLOCNO_CAP (a) != NULL)
2398
              {
2399
                for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2400
                  {
2401
                    ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2402
                    if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2403
                      OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2404
                    if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2405
                      OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2406
                  }
2407
                continue;
2408
              }
2409
            if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2410
              continue;
2411
            parent_a = parent->regno_allocno_map[i];
2412
            parent_obj = ALLOCNO_OBJECT (parent_a, j);
2413
            if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2414
              OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2415
            if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2416
              OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2417
          }
2418
      }
2419
#ifdef ENABLE_IRA_CHECKING
2420
  FOR_EACH_OBJECT (obj, oi)
2421
    {
2422
      if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2423
          && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2424
        continue;
2425
      gcc_unreachable ();
2426
    }
2427
#endif
2428
}
2429
 
2430
/* Sort allocnos according to their live ranges.  Allocnos with
2431
   smaller allocno class are put first unless we use priority
2432
   coloring.  Allocnos with the same class are ordered according
2433
   their start (min).  Allocnos with the same start are ordered
2434
   according their finish (max).  */
2435
static int
2436
object_range_compare_func (const void *v1p, const void *v2p)
2437
{
2438
  int diff;
2439
  ira_object_t obj1 = *(const ira_object_t *) v1p;
2440
  ira_object_t obj2 = *(const ira_object_t *) v2p;
2441
  ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2442
  ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2443
 
2444
  if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2445
    return diff;
2446
  if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2447
     return diff;
2448
  return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2449
}
2450
 
2451
/* Sort ira_object_id_map and set up conflict id of allocnos.  */
2452
static void
2453
sort_conflict_id_map (void)
2454
{
2455
  int i, num;
2456
  ira_allocno_t a;
2457
  ira_allocno_iterator ai;
2458
 
2459
  num = 0;
2460
  FOR_EACH_ALLOCNO (a, ai)
2461
    {
2462
      ira_allocno_object_iterator oi;
2463
      ira_object_t obj;
2464
 
2465
      FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2466
        ira_object_id_map[num++] = obj;
2467
    }
2468
  qsort (ira_object_id_map, num, sizeof (ira_object_t),
2469
         object_range_compare_func);
2470
  for (i = 0; i < num; i++)
2471
    {
2472
      ira_object_t obj = ira_object_id_map[i];
2473
 
2474
      gcc_assert (obj != NULL);
2475
      OBJECT_CONFLICT_ID (obj) = i;
2476
    }
2477
  for (i = num; i < ira_objects_num; i++)
2478
    ira_object_id_map[i] = NULL;
2479
}
2480
 
2481
/* Set up minimal and maximal conflict ids of allocnos with which
2482
   given allocno can conflict.  */
2483
static void
2484
setup_min_max_conflict_allocno_ids (void)
2485
{
2486
  int aclass;
2487
  int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2488
  int *live_range_min, *last_lived;
2489
  int word0_min, word0_max;
2490
  ira_allocno_t a;
2491
  ira_allocno_iterator ai;
2492
 
2493
  live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2494
  aclass = -1;
2495
  first_not_finished = -1;
2496
  for (i = 0; i < ira_objects_num; i++)
2497
    {
2498
      ira_object_t obj = ira_object_id_map[i];
2499
 
2500
      if (obj == NULL)
2501
        continue;
2502
 
2503
      a = OBJECT_ALLOCNO (obj);
2504
 
2505
      if (aclass < 0)
2506
        {
2507
          aclass = ALLOCNO_CLASS (a);
2508
          min = i;
2509
          first_not_finished = i;
2510
        }
2511
      else
2512
        {
2513
          start = OBJECT_MIN (obj);
2514
          /* If we skip an allocno, the allocno with smaller ids will
2515
             be also skipped because of the secondary sorting the
2516
             range finishes (see function
2517
             object_range_compare_func).  */
2518
          while (first_not_finished < i
2519
                 && start > OBJECT_MAX (ira_object_id_map
2520
                                        [first_not_finished]))
2521
            first_not_finished++;
2522
          min = first_not_finished;
2523
        }
2524
      if (min == i)
2525
        /* We could increase min further in this case but it is good
2526
           enough.  */
2527
        min++;
2528
      live_range_min[i] = OBJECT_MIN (obj);
2529
      OBJECT_MIN (obj) = min;
2530
    }
2531
  last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2532
  aclass = -1;
2533
  filled_area_start = -1;
2534
  for (i = ira_objects_num - 1; i >= 0; i--)
2535
    {
2536
      ira_object_t obj = ira_object_id_map[i];
2537
 
2538
      if (obj == NULL)
2539
        continue;
2540
 
2541
      a = OBJECT_ALLOCNO (obj);
2542
      if (aclass < 0)
2543
        {
2544
          aclass = ALLOCNO_CLASS (a);
2545
          for (j = 0; j < ira_max_point; j++)
2546
            last_lived[j] = -1;
2547
          filled_area_start = ira_max_point;
2548
        }
2549
      min = live_range_min[i];
2550
      finish = OBJECT_MAX (obj);
2551
      max = last_lived[finish];
2552
      if (max < 0)
2553
        /* We could decrease max further in this case but it is good
2554
           enough.  */
2555
        max = OBJECT_CONFLICT_ID (obj) - 1;
2556
      OBJECT_MAX (obj) = max;
2557
      /* In filling, we can go further A range finish to recognize
2558
         intersection quickly because if the finish of subsequently
2559
         processed allocno (it has smaller conflict id) range is
2560
         further A range finish than they are definitely intersected
2561
         (the reason for this is the allocnos with bigger conflict id
2562
         have their range starts not smaller than allocnos with
2563
         smaller ids.  */
2564
      for (j = min; j < filled_area_start; j++)
2565
        last_lived[j] = i;
2566
      filled_area_start = min;
2567
    }
2568
  ira_free (last_lived);
2569
  ira_free (live_range_min);
2570
 
2571
  /* For allocnos with more than one object, we may later record extra conflicts in
2572
     subobject 0 that we cannot really know about here.
2573
     For now, simply widen the min/max range of these subobjects.  */
2574
 
2575
  word0_min = INT_MAX;
2576
  word0_max = INT_MIN;
2577
 
2578
  FOR_EACH_ALLOCNO (a, ai)
2579
    {
2580
      int n = ALLOCNO_NUM_OBJECTS (a);
2581
      ira_object_t obj0;
2582
 
2583
      if (n < 2)
2584
        continue;
2585
      obj0 = ALLOCNO_OBJECT (a, 0);
2586
      if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2587
        word0_min = OBJECT_CONFLICT_ID (obj0);
2588
      if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2589
        word0_max = OBJECT_CONFLICT_ID (obj0);
2590
    }
2591
  FOR_EACH_ALLOCNO (a, ai)
2592
    {
2593
      int n = ALLOCNO_NUM_OBJECTS (a);
2594
      ira_object_t obj0;
2595
 
2596
      if (n < 2)
2597
        continue;
2598
      obj0 = ALLOCNO_OBJECT (a, 0);
2599
      if (OBJECT_MIN (obj0) > word0_min)
2600
        OBJECT_MIN (obj0) = word0_min;
2601
      if (OBJECT_MAX (obj0) < word0_max)
2602
        OBJECT_MAX (obj0) = word0_max;
2603
    }
2604
}
2605
 
2606
 
2607
 
2608
static void
2609
create_caps (void)
2610
{
2611
  ira_allocno_t a;
2612
  ira_allocno_iterator ai;
2613
  ira_loop_tree_node_t loop_tree_node;
2614
 
2615
  FOR_EACH_ALLOCNO (a, ai)
2616
    {
2617
      if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2618
        continue;
2619
      if (ALLOCNO_CAP_MEMBER (a) != NULL)
2620
        create_cap_allocno (a);
2621
      else if (ALLOCNO_CAP (a) == NULL)
2622
        {
2623
          loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2624
          if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2625
            create_cap_allocno (a);
2626
        }
2627
    }
2628
}
2629
 
2630
 
2631
 
2632
/* The page contains code transforming more one region internal
2633
   representation (IR) to one region IR which is necessary for reload.
2634
   This transformation is called IR flattening.  We might just rebuild
2635
   the IR for one region but we don't do it because it takes a lot of
2636
   time.  */
2637
 
2638
/* Map: regno -> allocnos which will finally represent the regno for
2639
   IR with one region.  */
2640
static ira_allocno_t *regno_top_level_allocno_map;
2641
 
2642
/* Find the allocno that corresponds to A at a level one higher up in the
2643
   loop tree.  Returns NULL if A is a cap, or if it has no parent.  */
2644
ira_allocno_t
2645
ira_parent_allocno (ira_allocno_t a)
2646
{
2647
  ira_loop_tree_node_t parent;
2648
 
2649
  if (ALLOCNO_CAP (a) != NULL)
2650
    return NULL;
2651
 
2652
  parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2653
  if (parent == NULL)
2654
    return NULL;
2655
 
2656
  return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
2657
}
2658
 
2659
/* Find the allocno that corresponds to A at a level one higher up in the
2660
   loop tree.  If ALLOCNO_CAP is set for A, return that.  */
2661
ira_allocno_t
2662
ira_parent_or_cap_allocno (ira_allocno_t a)
2663
{
2664
  if (ALLOCNO_CAP (a) != NULL)
2665
    return ALLOCNO_CAP (a);
2666
 
2667
  return ira_parent_allocno (a);
2668
}
2669
 
2670
/* Process all allocnos originated from pseudo REGNO and copy live
2671
   ranges, hard reg conflicts, and allocno stack reg attributes from
2672
   low level allocnos to final allocnos which are destinations of
2673
   removed stores at a loop exit.  Return true if we copied live
2674
   ranges.  */
2675
static bool
2676
copy_info_to_removed_store_destinations (int regno)
2677
{
2678
  ira_allocno_t a;
2679
  ira_allocno_t parent_a = NULL;
2680
  ira_loop_tree_node_t parent;
2681
  bool merged_p;
2682
 
2683
  merged_p = false;
2684
  for (a = ira_regno_allocno_map[regno];
2685
       a != NULL;
2686
       a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2687
    {
2688
      if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
2689
        /* This allocno will be removed.  */
2690
        continue;
2691
 
2692
      /* Caps will be removed.  */
2693
      ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2694
      for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2695
           parent != NULL;
2696
           parent = parent->parent)
2697
        if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2698
            || (parent_a
2699
                == regno_top_level_allocno_map[REGNO
2700
                                               (allocno_emit_reg (parent_a))]
2701
                && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
2702
          break;
2703
      if (parent == NULL || parent_a == NULL)
2704
        continue;
2705
 
2706
      copy_allocno_live_ranges (a, parent_a);
2707
      merge_hard_reg_conflicts (a, parent_a, true);
2708
 
2709
      ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2710
      ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2711
        += ALLOCNO_CALLS_CROSSED_NUM (a);
2712
      ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2713
        += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2714
      merged_p = true;
2715
    }
2716
  return merged_p;
2717
}
2718
 
2719
/* Flatten the IR.  In other words, this function transforms IR as if
2720
   it were built with one region (without loops).  We could make it
2721
   much simpler by rebuilding IR with one region, but unfortunately it
2722
   takes a lot of time.  MAX_REGNO_BEFORE_EMIT and
2723
   IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2724
   IRA_MAX_POINT before emitting insns on the loop borders.  */
2725
void
2726
ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2727
{
2728
  int i, j;
2729
  bool keep_p;
2730
  int hard_regs_num;
2731
  bool new_pseudos_p, merged_p, mem_dest_p;
2732
  unsigned int n;
2733
  enum reg_class aclass;
2734
  ira_allocno_t a, parent_a, first, second, node_first, node_second;
2735
  ira_copy_t cp;
2736
  ira_loop_tree_node_t node;
2737
  live_range_t r;
2738
  ira_allocno_iterator ai;
2739
  ira_copy_iterator ci;
2740
 
2741
  regno_top_level_allocno_map
2742
    = (ira_allocno_t *) ira_allocate (max_reg_num ()
2743
                                      * sizeof (ira_allocno_t));
2744
  memset (regno_top_level_allocno_map, 0,
2745
          max_reg_num () * sizeof (ira_allocno_t));
2746
  new_pseudos_p = merged_p = false;
2747
  FOR_EACH_ALLOCNO (a, ai)
2748
    {
2749
      ira_allocno_object_iterator oi;
2750
      ira_object_t obj;
2751
 
2752
      if (ALLOCNO_CAP_MEMBER (a) != NULL)
2753
        /* Caps are not in the regno allocno maps and they are never
2754
           will be transformed into allocnos existing after IR
2755
           flattening.  */
2756
        continue;
2757
      FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2758
        COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2759
                           OBJECT_CONFLICT_HARD_REGS (obj));
2760
#ifdef STACK_REGS
2761
      ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2762
#endif
2763
    }
2764
  /* Fix final allocno attributes.  */
2765
  for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2766
    {
2767
      mem_dest_p = false;
2768
      for (a = ira_regno_allocno_map[i];
2769
           a != NULL;
2770
           a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2771
        {
2772
          ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
2773
 
2774
          ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2775
          if (data->somewhere_renamed_p)
2776
            new_pseudos_p = true;
2777
          parent_a = ira_parent_allocno (a);
2778
          if (parent_a == NULL)
2779
            {
2780
              ALLOCNO_COPIES (a) = NULL;
2781
              regno_top_level_allocno_map[REGNO (data->reg)] = a;
2782
              continue;
2783
            }
2784
          ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2785
 
2786
          if (data->mem_optimized_dest != NULL)
2787
            mem_dest_p = true;
2788
          parent_data = ALLOCNO_EMIT_DATA (parent_a);
2789
          if (REGNO (data->reg) == REGNO (parent_data->reg))
2790
            {
2791
              merge_hard_reg_conflicts (a, parent_a, true);
2792
              move_allocno_live_ranges (a, parent_a);
2793
              merged_p = true;
2794
              parent_data->mem_optimized_dest_p
2795
                = (parent_data->mem_optimized_dest_p
2796
                   || data->mem_optimized_dest_p);
2797
              continue;
2798
            }
2799
          new_pseudos_p = true;
2800
          for (;;)
2801
            {
2802
              ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2803
              ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2804
              ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2805
              ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2806
                -= ALLOCNO_CALLS_CROSSED_NUM (a);
2807
              ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2808
                -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2809
              ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2810
                          && ALLOCNO_NREFS (parent_a) >= 0
2811
                          && ALLOCNO_FREQ (parent_a) >= 0);
2812
              aclass = ALLOCNO_CLASS (parent_a);
2813
              hard_regs_num = ira_class_hard_regs_num[aclass];
2814
              if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2815
                  && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2816
                for (j = 0; j < hard_regs_num; j++)
2817
                  ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2818
                    -= ALLOCNO_HARD_REG_COSTS (a)[j];
2819
              if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2820
                  && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2821
                for (j = 0; j < hard_regs_num; j++)
2822
                  ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2823
                    -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2824
              ALLOCNO_CLASS_COST (parent_a)
2825
                -= ALLOCNO_CLASS_COST (a);
2826
              ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2827
              parent_a = ira_parent_allocno (parent_a);
2828
              if (parent_a == NULL)
2829
                break;
2830
            }
2831
          ALLOCNO_COPIES (a) = NULL;
2832
          regno_top_level_allocno_map[REGNO (data->reg)] = a;
2833
        }
2834
      if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2835
        merged_p = true;
2836
    }
2837
  ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2838
  if (merged_p || ira_max_point_before_emit != ira_max_point)
2839
    ira_rebuild_start_finish_chains ();
2840
  if (new_pseudos_p)
2841
    {
2842
      sparseset objects_live;
2843
 
2844
      /* Rebuild conflicts.  */
2845
      FOR_EACH_ALLOCNO (a, ai)
2846
        {
2847
          ira_allocno_object_iterator oi;
2848
          ira_object_t obj;
2849
 
2850
          if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2851
              || ALLOCNO_CAP_MEMBER (a) != NULL)
2852
            continue;
2853
          FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2854
            {
2855
              for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2856
                ira_assert (r->object == obj);
2857
              clear_conflicts (obj);
2858
            }
2859
        }
2860
      objects_live = sparseset_alloc (ira_objects_num);
2861
      for (i = 0; i < ira_max_point; i++)
2862
        {
2863
          for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
2864
            {
2865
              ira_object_t obj = r->object;
2866
 
2867
              a = OBJECT_ALLOCNO (obj);
2868
              if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2869
                  || ALLOCNO_CAP_MEMBER (a) != NULL)
2870
                continue;
2871
 
2872
              aclass = ALLOCNO_CLASS (a);
2873
              sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
2874
              EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
2875
                {
2876
                  ira_object_t live_obj = ira_object_id_map[n];
2877
                  ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
2878
                  enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
2879
 
2880
                  if (ira_reg_classes_intersect_p[aclass][live_aclass]
2881
                      /* Don't set up conflict for the allocno with itself.  */
2882
                      && live_a != a)
2883
                    ira_add_conflict (obj, live_obj);
2884
                }
2885
            }
2886
 
2887
          for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
2888
            sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
2889
        }
2890
      sparseset_free (objects_live);
2891
      compress_conflict_vecs ();
2892
    }
2893
  /* Mark some copies for removing and change allocnos in the rest
2894
     copies.  */
2895
  FOR_EACH_COPY (cp, ci)
2896
    {
2897
      if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
2898
          || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
2899
        {
2900
          if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2901
            fprintf
2902
              (ira_dump_file, "      Remove cp%d:%c%dr%d-%c%dr%d\n",
2903
               cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
2904
               ALLOCNO_NUM (cp->first),
2905
               REGNO (allocno_emit_reg (cp->first)),
2906
               ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
2907
               ALLOCNO_NUM (cp->second),
2908
               REGNO (allocno_emit_reg (cp->second)));
2909
          cp->loop_tree_node = NULL;
2910
          continue;
2911
        }
2912
      first
2913
        = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
2914
      second
2915
        = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
2916
      node = cp->loop_tree_node;
2917
      if (node == NULL)
2918
        keep_p = true; /* It copy generated in ira-emit.c.  */
2919
      else
2920
        {
2921
          /* Check that the copy was not propagated from level on
2922
             which we will have different pseudos.  */
2923
          node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
2924
          node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
2925
          keep_p = ((REGNO (allocno_emit_reg (first))
2926
                     == REGNO (allocno_emit_reg (node_first)))
2927
                     && (REGNO (allocno_emit_reg (second))
2928
                         == REGNO (allocno_emit_reg (node_second))));
2929
        }
2930
      if (keep_p)
2931
        {
2932
          cp->loop_tree_node = ira_loop_tree_root;
2933
          cp->first = first;
2934
          cp->second = second;
2935
        }
2936
      else
2937
        {
2938
          cp->loop_tree_node = NULL;
2939
          if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2940
            fprintf (ira_dump_file, "      Remove cp%d:a%dr%d-a%dr%d\n",
2941
                     cp->num, ALLOCNO_NUM (cp->first),
2942
                     REGNO (allocno_emit_reg (cp->first)),
2943
                     ALLOCNO_NUM (cp->second),
2944
                     REGNO (allocno_emit_reg (cp->second)));
2945
        }
2946
    }
2947
  /* Remove unnecessary allocnos on lower levels of the loop tree.  */
2948
  FOR_EACH_ALLOCNO (a, ai)
2949
    {
2950
      if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2951
          || ALLOCNO_CAP_MEMBER (a) != NULL)
2952
        {
2953
          if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2954
            fprintf (ira_dump_file, "      Remove a%dr%d\n",
2955
                     ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
2956
          finish_allocno (a);
2957
          continue;
2958
        }
2959
      ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2960
      ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
2961
      ALLOCNO_CAP (a) = NULL;
2962
      /* Restore updated costs for assignments from reload.  */
2963
      ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
2964
      ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
2965
      if (! ALLOCNO_ASSIGNED_P (a))
2966
        ira_free_allocno_updated_costs (a);
2967
      ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2968
      ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2969
    }
2970
  /* Remove unnecessary copies.  */
2971
  FOR_EACH_COPY (cp, ci)
2972
    {
2973
      if (cp->loop_tree_node == NULL)
2974
        {
2975
          ira_copies[cp->num] = NULL;
2976
          finish_copy (cp);
2977
          continue;
2978
        }
2979
      ira_assert
2980
        (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
2981
         && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
2982
      ira_add_allocno_copy_to_list (cp);
2983
      ira_swap_allocno_copy_ends_if_necessary (cp);
2984
    }
2985
  rebuild_regno_allocno_maps ();
2986
  if (ira_max_point != ira_max_point_before_emit)
2987
    ira_compress_allocno_live_ranges ();
2988
  ira_free (regno_top_level_allocno_map);
2989
}
2990
 
2991
 
2992
 
2993
#ifdef ENABLE_IRA_CHECKING
2994
/* Check creation of all allocnos.  Allocnos on lower levels should
2995
   have allocnos or caps on all upper levels.  */
2996
static void
2997
check_allocno_creation (void)
2998
{
2999
  ira_allocno_t a;
3000
  ira_allocno_iterator ai;
3001
  ira_loop_tree_node_t loop_tree_node;
3002
 
3003
  FOR_EACH_ALLOCNO (a, ai)
3004
    {
3005
      loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3006
      ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3007
                                ALLOCNO_NUM (a)));
3008
      if (loop_tree_node == ira_loop_tree_root)
3009
        continue;
3010
      if (ALLOCNO_CAP_MEMBER (a) != NULL)
3011
        ira_assert (ALLOCNO_CAP (a) != NULL);
3012
      else if (ALLOCNO_CAP (a) == NULL)
3013
        ira_assert (loop_tree_node->parent
3014
                    ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3015
                    && bitmap_bit_p (loop_tree_node->border_allocnos,
3016
                                     ALLOCNO_NUM (a)));
3017
    }
3018
}
3019
#endif
3020
 
3021
/* Identify allocnos which prefer a register class with a single hard register.
3022
   Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3023
   less likely to use the preferred singleton register.  */
3024
static void
3025
update_conflict_hard_reg_costs (void)
3026
{
3027
  ira_allocno_t a;
3028
  ira_allocno_iterator ai;
3029
  int i, index, min;
3030
 
3031
  FOR_EACH_ALLOCNO (a, ai)
3032
    {
3033
      reg_class_t aclass = ALLOCNO_CLASS (a);
3034
      reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3035
 
3036
      if (reg_class_size[(int) pref] != 1)
3037
        continue;
3038
      index = ira_class_hard_reg_index[(int) aclass]
3039
                                      [ira_class_hard_regs[(int) pref][0]];
3040
      if (index < 0)
3041
        continue;
3042
      if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3043
          || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3044
        continue;
3045
      min = INT_MAX;
3046
      for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3047
        if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3048
            && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3049
          min = ALLOCNO_HARD_REG_COSTS (a)[i];
3050
      if (min == INT_MAX)
3051
        continue;
3052
      ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3053
                                  aclass, 0);
3054
      ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3055
        -= min - ALLOCNO_CLASS_COST (a);
3056
    }
3057
}
3058
 
3059
/* Create a internal representation (IR) for IRA (allocnos, copies,
3060
   loop tree nodes).  The function returns TRUE if we generate loop
3061
   structure (besides nodes representing all function and the basic
3062
   blocks) for regional allocation.  A true return means that we
3063
   really need to flatten IR before the reload.  */
3064
bool
3065
ira_build (void)
3066
{
3067
  bool loops_p;
3068
 
3069
  df_analyze ();
3070
  initiate_cost_vectors ();
3071
  initiate_allocnos ();
3072
  initiate_copies ();
3073
  create_loop_tree_nodes ();
3074
  form_loop_tree ();
3075
  create_allocnos ();
3076
  ira_costs ();
3077
  create_allocno_objects ();
3078
  ira_create_allocno_live_ranges ();
3079
  remove_unnecessary_regions (false);
3080
  ira_compress_allocno_live_ranges ();
3081
  update_bad_spill_attribute ();
3082
  loops_p = more_one_region_p ();
3083
  if (loops_p)
3084
    {
3085
      propagate_allocno_info ();
3086
      create_caps ();
3087
    }
3088
  ira_tune_allocno_costs ();
3089
#ifdef ENABLE_IRA_CHECKING
3090
  check_allocno_creation ();
3091
#endif
3092
  setup_min_max_allocno_live_range_point ();
3093
  sort_conflict_id_map ();
3094
  setup_min_max_conflict_allocno_ids ();
3095
  ira_build_conflicts ();
3096
  update_conflict_hard_reg_costs ();
3097
  if (! ira_conflicts_p)
3098
    {
3099
      ira_allocno_t a;
3100
      ira_allocno_iterator ai;
3101
 
3102
      /* Remove all regions but root one.  */
3103
      if (loops_p)
3104
        {
3105
          remove_unnecessary_regions (true);
3106
          loops_p = false;
3107
        }
3108
      /* We don't save hard registers around calls for fast allocation
3109
         -- add caller clobbered registers as conflicting ones to
3110
         allocno crossing calls.  */
3111
      FOR_EACH_ALLOCNO (a, ai)
3112
        if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3113
          ior_hard_reg_conflicts (a, &call_used_reg_set);
3114
    }
3115
  if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3116
    print_copies (ira_dump_file);
3117
  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3118
    {
3119
      int n, nr, nr_big;
3120
      ira_allocno_t a;
3121
      live_range_t r;
3122
      ira_allocno_iterator ai;
3123
 
3124
      n = 0;
3125
      nr = 0;
3126
      nr_big = 0;
3127
      FOR_EACH_ALLOCNO (a, ai)
3128
        {
3129
          int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3130
 
3131
          if (nobj > 1)
3132
            nr_big++;
3133
          for (j = 0; j < nobj; j++)
3134
            {
3135
              ira_object_t obj = ALLOCNO_OBJECT (a, j);
3136
              n += OBJECT_NUM_CONFLICTS (obj);
3137
              for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3138
                nr++;
3139
            }
3140
        }
3141
      fprintf (ira_dump_file, "  regions=%d, blocks=%d, points=%d\n",
3142
               current_loops == NULL ? 1 : VEC_length (loop_p, ira_loops.larray),
3143
               n_basic_blocks, ira_max_point);
3144
      fprintf (ira_dump_file,
3145
               "    allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3146
               ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3147
    }
3148
  return loops_p;
3149
}
3150
 
3151
/* Release the data created by function ira_build.  */
3152
void
3153
ira_destroy (void)
3154
{
3155
  finish_loop_tree_nodes ();
3156
  finish_copies ();
3157
  finish_allocnos ();
3158
  finish_cost_vectors ();
3159
  ira_finish_allocno_live_ranges ();
3160
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.