OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gcc-4.5.1/] [gcc/] [sel-sched-ir.c] - Blame information for rev 843

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Instruction scheduling pass.  Selective scheduler and pipeliner.
2
   Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it under
7
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
#include "config.h"
21
#include "system.h"
22
#include "coretypes.h"
23
#include "tm.h"
24
#include "toplev.h"
25
#include "rtl.h"
26
#include "tm_p.h"
27
#include "hard-reg-set.h"
28
#include "regs.h"
29
#include "function.h"
30
#include "flags.h"
31
#include "insn-config.h"
32
#include "insn-attr.h"
33
#include "except.h"
34
#include "toplev.h"
35
#include "recog.h"
36
#include "params.h"
37
#include "target.h"
38
#include "timevar.h"
39
#include "tree-pass.h"
40
#include "sched-int.h"
41
#include "ggc.h"
42
#include "tree.h"
43
#include "vec.h"
44
#include "langhooks.h"
45
#include "rtlhooks-def.h"
46
 
47
#ifdef INSN_SCHEDULING
48
#include "sel-sched-ir.h"
49
/* We don't have to use it except for sel_print_insn.  */
50
#include "sel-sched-dump.h"
51
 
52
/* A vector holding bb info for whole scheduling pass.  */
53
VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
54
 
55
/* A vector holding bb info.  */
56
VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
57
 
58
/* A pool for allocating all lists.  */
59
alloc_pool sched_lists_pool;
60
 
61
/* This contains information about successors for compute_av_set.  */
62
struct succs_info current_succs;
63
 
64
/* Data structure to describe interaction with the generic scheduler utils.  */
65
static struct common_sched_info_def sel_common_sched_info;
66
 
67
/* The loop nest being pipelined.  */
68
struct loop *current_loop_nest;
69
 
70
/* LOOP_NESTS is a vector containing the corresponding loop nest for
71
   each region.  */
72
static VEC(loop_p, heap) *loop_nests = NULL;
73
 
74
/* Saves blocks already in loop regions, indexed by bb->index.  */
75
static sbitmap bbs_in_loop_rgns = NULL;
76
 
77
/* CFG hooks that are saved before changing create_basic_block hook.  */
78
static struct cfg_hooks orig_cfg_hooks;
79
 
80
 
81
/* Array containing reverse topological index of function basic blocks,
82
   indexed by BB->INDEX.  */
83
static int *rev_top_order_index = NULL;
84
 
85
/* Length of the above array.  */
86
static int rev_top_order_index_len = -1;
87
 
88
/* A regset pool structure.  */
89
static struct
90
{
91
  /* The stack to which regsets are returned.  */
92
  regset *v;
93
 
94
  /* Its pointer.  */
95
  int n;
96
 
97
  /* Its size.  */
98
  int s;
99
 
100
  /* In VV we save all generated regsets so that, when destructing the
101
     pool, we can compare it with V and check that every regset was returned
102
     back to pool.  */
103
  regset *vv;
104
 
105
  /* The pointer of VV stack.  */
106
  int nn;
107
 
108
  /* Its size.  */
109
  int ss;
110
 
111
  /* The difference between allocated and returned regsets.  */
112
  int diff;
113
} regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
114
 
115
/* This represents the nop pool.  */
116
static struct
117
{
118
  /* The vector which holds previously emitted nops.  */
119
  insn_t *v;
120
 
121
  /* Its pointer.  */
122
  int n;
123
 
124
  /* Its size.  */
125
  int s;
126
} nop_pool = { NULL, 0, 0 };
127
 
128
/* The pool for basic block notes.  */
129
static rtx_vec_t bb_note_pool;
130
 
131
/* A NOP pattern used to emit placeholder insns.  */
132
rtx nop_pattern = NULL_RTX;
133
/* A special instruction that resides in EXIT_BLOCK.
134
   EXIT_INSN is successor of the insns that lead to EXIT_BLOCK.  */
135
rtx exit_insn = NULL_RTX;
136
 
137
/* TRUE if while scheduling current region, which is loop, its preheader
138
   was removed.  */
139
bool preheader_removed = false;
140
 
141
 
142
/* Forward static declarations.  */
143
static void fence_clear (fence_t);
144
 
145
static void deps_init_id (idata_t, insn_t, bool);
146
static void init_id_from_df (idata_t, insn_t, bool);
147
static expr_t set_insn_init (expr_t, vinsn_t, int);
148
 
149
static void cfg_preds (basic_block, insn_t **, int *);
150
static void prepare_insn_expr (insn_t, int);
151
static void free_history_vect (VEC (expr_history_def, heap) **);
152
 
153
static void move_bb_info (basic_block, basic_block);
154
static void remove_empty_bb (basic_block, bool);
155
static void sel_remove_loop_preheader (void);
156
 
157
static bool insn_is_the_only_one_in_bb_p (insn_t);
158
static void create_initial_data_sets (basic_block);
159
 
160
static void free_av_set (basic_block);
161
static void invalidate_av_set (basic_block);
162
static void extend_insn_data (void);
163
static void sel_init_new_insn (insn_t, int);
164
static void finish_insns (void);
165
 
166
/* Various list functions.  */
167
 
168
/* Copy an instruction list L.  */
169
ilist_t
170
ilist_copy (ilist_t l)
171
{
172
  ilist_t head = NULL, *tailp = &head;
173
 
174
  while (l)
175
    {
176
      ilist_add (tailp, ILIST_INSN (l));
177
      tailp = &ILIST_NEXT (*tailp);
178
      l = ILIST_NEXT (l);
179
    }
180
 
181
  return head;
182
}
183
 
184
/* Invert an instruction list L.  */
185
ilist_t
186
ilist_invert (ilist_t l)
187
{
188
  ilist_t res = NULL;
189
 
190
  while (l)
191
    {
192
      ilist_add (&res, ILIST_INSN (l));
193
      l = ILIST_NEXT (l);
194
    }
195
 
196
  return res;
197
}
198
 
199
/* Add a new boundary to the LP list with parameters TO, PTR, and DC.  */
200
void
201
blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
202
{
203
  bnd_t bnd;
204
 
205
  _list_add (lp);
206
  bnd = BLIST_BND (*lp);
207
 
208
  BND_TO (bnd) = to;
209
  BND_PTR (bnd) = ptr;
210
  BND_AV (bnd) = NULL;
211
  BND_AV1 (bnd) = NULL;
212
  BND_DC (bnd) = dc;
213
}
214
 
215
/* Remove the list note pointed to by LP.  */
216
void
217
blist_remove (blist_t *lp)
218
{
219
  bnd_t b = BLIST_BND (*lp);
220
 
221
  av_set_clear (&BND_AV (b));
222
  av_set_clear (&BND_AV1 (b));
223
  ilist_clear (&BND_PTR (b));
224
 
225
  _list_remove (lp);
226
}
227
 
228
/* Init a fence tail L.  */
229
void
230
flist_tail_init (flist_tail_t l)
231
{
232
  FLIST_TAIL_HEAD (l) = NULL;
233
  FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
234
}
235
 
236
/* Try to find fence corresponding to INSN in L.  */
237
fence_t
238
flist_lookup (flist_t l, insn_t insn)
239
{
240
  while (l)
241
    {
242
      if (FENCE_INSN (FLIST_FENCE (l)) == insn)
243
        return FLIST_FENCE (l);
244
 
245
      l = FLIST_NEXT (l);
246
    }
247
 
248
  return NULL;
249
}
250
 
251
/* Init the fields of F before running fill_insns.  */
252
static void
253
init_fence_for_scheduling (fence_t f)
254
{
255
  FENCE_BNDS (f) = NULL;
256
  FENCE_PROCESSED_P (f) = false;
257
  FENCE_SCHEDULED_P (f) = false;
258
}
259
 
260
/* Add new fence consisting of INSN and STATE to the list pointed to by LP.  */
261
static void
262
flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
263
           insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
264
           int *ready_ticks, int ready_ticks_size, insn_t sched_next,
265
           int cycle, int cycle_issued_insns, int issue_more,
266
           bool starts_cycle_p, bool after_stall_p)
267
{
268
  fence_t f;
269
 
270
  _list_add (lp);
271
  f = FLIST_FENCE (*lp);
272
 
273
  FENCE_INSN (f) = insn;
274
 
275
  gcc_assert (state != NULL);
276
  FENCE_STATE (f) = state;
277
 
278
  FENCE_CYCLE (f) = cycle;
279
  FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
280
  FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
281
  FENCE_AFTER_STALL_P (f) = after_stall_p;
282
 
283
  gcc_assert (dc != NULL);
284
  FENCE_DC (f) = dc;
285
 
286
  gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
287
  FENCE_TC (f) = tc;
288
 
289
  FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
290
  FENCE_ISSUE_MORE (f) = issue_more;
291
  FENCE_EXECUTING_INSNS (f) = executing_insns;
292
  FENCE_READY_TICKS (f) = ready_ticks;
293
  FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
294
  FENCE_SCHED_NEXT (f) = sched_next;
295
 
296
  init_fence_for_scheduling (f);
297
}
298
 
299
/* Remove the head node of the list pointed to by LP.  */
300
static void
301
flist_remove (flist_t *lp)
302
{
303
  if (FENCE_INSN (FLIST_FENCE (*lp)))
304
    fence_clear (FLIST_FENCE (*lp));
305
  _list_remove (lp);
306
}
307
 
308
/* Clear the fence list pointed to by LP.  */
309
void
310
flist_clear (flist_t *lp)
311
{
312
  while (*lp)
313
    flist_remove (lp);
314
}
315
 
316
/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL.  */
317
void
318
def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
319
{
320
  def_t d;
321
 
322
  _list_add (dl);
323
  d = DEF_LIST_DEF (*dl);
324
 
325
  d->orig_insn = original_insn;
326
  d->crosses_call = crosses_call;
327
}
328
 
329
 
330
/* Functions to work with target contexts.  */
331
 
332
/* Bulk target context.  It is convenient for debugging purposes to ensure
333
   that there are no uninitialized (null) target contexts.  */
334
static tc_t bulk_tc = (tc_t) 1;
335
 
336
/* Target hooks wrappers.  In the future we can provide some default
337
   implementations for them.  */
338
 
339
/* Allocate a store for the target context.  */
340
static tc_t
341
alloc_target_context (void)
342
{
343
  return (targetm.sched.alloc_sched_context
344
          ? targetm.sched.alloc_sched_context () : bulk_tc);
345
}
346
 
347
/* Init target context TC.
348
   If CLEAN_P is true, then make TC as it is beginning of the scheduler.
349
   Overwise, copy current backend context to TC.  */
350
static void
351
init_target_context (tc_t tc, bool clean_p)
352
{
353
  if (targetm.sched.init_sched_context)
354
    targetm.sched.init_sched_context (tc, clean_p);
355
}
356
 
357
/* Allocate and initialize a target context.  Meaning of CLEAN_P is the same as
358
   int init_target_context ().  */
359
tc_t
360
create_target_context (bool clean_p)
361
{
362
  tc_t tc = alloc_target_context ();
363
 
364
  init_target_context (tc, clean_p);
365
  return tc;
366
}
367
 
368
/* Copy TC to the current backend context.  */
369
void
370
set_target_context (tc_t tc)
371
{
372
  if (targetm.sched.set_sched_context)
373
    targetm.sched.set_sched_context (tc);
374
}
375
 
376
/* TC is about to be destroyed.  Free any internal data.  */
377
static void
378
clear_target_context (tc_t tc)
379
{
380
  if (targetm.sched.clear_sched_context)
381
    targetm.sched.clear_sched_context (tc);
382
}
383
 
384
/*  Clear and free it.  */
385
static void
386
delete_target_context (tc_t tc)
387
{
388
  clear_target_context (tc);
389
 
390
  if (targetm.sched.free_sched_context)
391
    targetm.sched.free_sched_context (tc);
392
}
393
 
394
/* Make a copy of FROM in TO.
395
   NB: May be this should be a hook.  */
396
static void
397
copy_target_context (tc_t to, tc_t from)
398
{
399
  tc_t tmp = create_target_context (false);
400
 
401
  set_target_context (from);
402
  init_target_context (to, false);
403
 
404
  set_target_context (tmp);
405
  delete_target_context (tmp);
406
}
407
 
408
/* Create a copy of TC.  */
409
static tc_t
410
create_copy_of_target_context (tc_t tc)
411
{
412
  tc_t copy = alloc_target_context ();
413
 
414
  copy_target_context (copy, tc);
415
 
416
  return copy;
417
}
418
 
419
/* Clear TC and initialize it according to CLEAN_P.  The meaning of CLEAN_P
420
   is the same as in init_target_context ().  */
421
void
422
reset_target_context (tc_t tc, bool clean_p)
423
{
424
  clear_target_context (tc);
425
  init_target_context (tc, clean_p);
426
}
427
 
428
/* Functions to work with dependence contexts.
429
   Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
430
   context.  It accumulates information about processed insns to decide if
431
   current insn is dependent on the processed ones.  */
432
 
433
/* Make a copy of FROM in TO.  */
434
static void
435
copy_deps_context (deps_t to, deps_t from)
436
{
437
  init_deps (to, false);
438
  deps_join (to, from);
439
}
440
 
441
/* Allocate store for dep context.  */
442
static deps_t
443
alloc_deps_context (void)
444
{
445
  return XNEW (struct deps_desc);
446
}
447
 
448
/* Allocate and initialize dep context.  */
449
static deps_t
450
create_deps_context (void)
451
{
452
  deps_t dc = alloc_deps_context ();
453
 
454
  init_deps (dc, false);
455
  return dc;
456
}
457
 
458
/* Create a copy of FROM.  */
459
static deps_t
460
create_copy_of_deps_context (deps_t from)
461
{
462
  deps_t to = alloc_deps_context ();
463
 
464
  copy_deps_context (to, from);
465
  return to;
466
}
467
 
468
/* Clean up internal data of DC.  */
469
static void
470
clear_deps_context (deps_t dc)
471
{
472
  free_deps (dc);
473
}
474
 
475
/* Clear and free DC.  */
476
static void
477
delete_deps_context (deps_t dc)
478
{
479
  clear_deps_context (dc);
480
  free (dc);
481
}
482
 
483
/* Clear and init DC.  */
484
static void
485
reset_deps_context (deps_t dc)
486
{
487
  clear_deps_context (dc);
488
  init_deps (dc, false);
489
}
490
 
491
/* This structure describes the dependence analysis hooks for advancing
492
   dependence context.  */
493
static struct sched_deps_info_def advance_deps_context_sched_deps_info =
494
  {
495
    NULL,
496
 
497
    NULL, /* start_insn */
498
    NULL, /* finish_insn */
499
    NULL, /* start_lhs */
500
    NULL, /* finish_lhs */
501
    NULL, /* start_rhs */
502
    NULL, /* finish_rhs */
503
    haifa_note_reg_set,
504
    haifa_note_reg_clobber,
505
    haifa_note_reg_use,
506
    NULL, /* note_mem_dep */
507
    NULL, /* note_dep */
508
 
509
    0, 0, 0
510
  };
511
 
512
/* Process INSN and add its impact on DC.  */
513
void
514
advance_deps_context (deps_t dc, insn_t insn)
515
{
516
  sched_deps_info = &advance_deps_context_sched_deps_info;
517
  deps_analyze_insn (dc, insn);
518
}
519
 
520
 
521
/* Functions to work with DFA states.  */
522
 
523
/* Allocate store for a DFA state.  */
524
static state_t
525
state_alloc (void)
526
{
527
  return xmalloc (dfa_state_size);
528
}
529
 
530
/* Allocate and initialize DFA state.  */
531
static state_t
532
state_create (void)
533
{
534
  state_t state = state_alloc ();
535
 
536
  state_reset (state);
537
  advance_state (state);
538
  return state;
539
}
540
 
541
/* Free DFA state.  */
542
static void
543
state_free (state_t state)
544
{
545
  free (state);
546
}
547
 
548
/* Make a copy of FROM in TO.  */
549
static void
550
state_copy (state_t to, state_t from)
551
{
552
  memcpy (to, from, dfa_state_size);
553
}
554
 
555
/* Create a copy of FROM.  */
556
static state_t
557
state_create_copy (state_t from)
558
{
559
  state_t to = state_alloc ();
560
 
561
  state_copy (to, from);
562
  return to;
563
}
564
 
565
 
566
/* Functions to work with fences.  */
567
 
568
/* Clear the fence.  */
569
static void
570
fence_clear (fence_t f)
571
{
572
  state_t s = FENCE_STATE (f);
573
  deps_t dc = FENCE_DC (f);
574
  void *tc = FENCE_TC (f);
575
 
576
  ilist_clear (&FENCE_BNDS (f));
577
 
578
  gcc_assert ((s != NULL && dc != NULL && tc != NULL)
579
              || (s == NULL && dc == NULL && tc == NULL));
580
 
581
  if (s != NULL)
582
    free (s);
583
 
584
  if (dc != NULL)
585
    delete_deps_context (dc);
586
 
587
  if (tc != NULL)
588
    delete_target_context (tc);
589
  VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
590
  free (FENCE_READY_TICKS (f));
591
  FENCE_READY_TICKS (f) = NULL;
592
}
593
 
594
/* Init a list of fences with successors of OLD_FENCE.  */
595
void
596
init_fences (insn_t old_fence)
597
{
598
  insn_t succ;
599
  succ_iterator si;
600
  bool first = true;
601
  int ready_ticks_size = get_max_uid () + 1;
602
 
603
  FOR_EACH_SUCC_1 (succ, si, old_fence,
604
                   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
605
    {
606
 
607
      if (first)
608
        first = false;
609
      else
610
        gcc_assert (flag_sel_sched_pipelining_outer_loops);
611
 
612
      flist_add (&fences, succ,
613
                 state_create (),
614
                 create_deps_context () /* dc */,
615
                 create_target_context (true) /* tc */,
616
                 NULL_RTX /* last_scheduled_insn */,
617
                 NULL, /* executing_insns */
618
                 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
619
                 ready_ticks_size,
620
                 NULL_RTX /* sched_next */,
621
                 1 /* cycle */, 0 /* cycle_issued_insns */,
622
                 issue_rate, /* issue_more */
623
                 1 /* starts_cycle_p */, 0 /* after_stall_p */);
624
    }
625
}
626
 
627
/* Merges two fences (filling fields of fence F with resulting values) by
628
   following rules: 1) state, target context and last scheduled insn are
629
   propagated from fallthrough edge if it is available;
630
   2) deps context and cycle is propagated from more probable edge;
631
   3) all other fields are set to corresponding constant values.
632
 
633
   INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
634
   READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
635
   and AFTER_STALL_P are the corresponding fields of the second fence.  */
636
static void
637
merge_fences (fence_t f, insn_t insn,
638
              state_t state, deps_t dc, void *tc,
639
              rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
640
              int *ready_ticks, int ready_ticks_size,
641
              rtx sched_next, int cycle, int issue_more, bool after_stall_p)
642
{
643
  insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
644
 
645
  gcc_assert (sel_bb_head_p (FENCE_INSN (f))
646
              && !sched_next && !FENCE_SCHED_NEXT (f));
647
 
648
  /* Check if we can decide which path fences came.
649
     If we can't (or don't want to) - reset all.  */
650
  if (last_scheduled_insn == NULL
651
      || last_scheduled_insn_old == NULL
652
      /* This is a case when INSN is reachable on several paths from
653
         one insn (this can happen when pipelining of outer loops is on and
654
         there are two edges: one going around of inner loop and the other -
655
         right through it; in such case just reset everything).  */
656
      || last_scheduled_insn == last_scheduled_insn_old)
657
    {
658
      state_reset (FENCE_STATE (f));
659
      state_free (state);
660
 
661
      reset_deps_context (FENCE_DC (f));
662
      delete_deps_context (dc);
663
 
664
      reset_target_context (FENCE_TC (f), true);
665
      delete_target_context (tc);
666
 
667
      if (cycle > FENCE_CYCLE (f))
668
        FENCE_CYCLE (f) = cycle;
669
 
670
      FENCE_LAST_SCHEDULED_INSN (f) = NULL;
671
      FENCE_ISSUE_MORE (f) = issue_rate;
672
      VEC_free (rtx, gc, executing_insns);
673
      free (ready_ticks);
674
      if (FENCE_EXECUTING_INSNS (f))
675
        VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
676
                          VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
677
      if (FENCE_READY_TICKS (f))
678
        memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
679
    }
680
  else
681
    {
682
      edge edge_old = NULL, edge_new = NULL;
683
      edge candidate;
684
      succ_iterator si;
685
      insn_t succ;
686
 
687
      /* Find fallthrough edge.  */
688
      gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
689
      candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb);
690
 
691
      if (!candidate
692
          || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
693
              && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
694
        {
695
          /* No fallthrough edge leading to basic block of INSN.  */
696
          state_reset (FENCE_STATE (f));
697
          state_free (state);
698
 
699
          reset_target_context (FENCE_TC (f), true);
700
          delete_target_context (tc);
701
 
702
          FENCE_LAST_SCHEDULED_INSN (f) = NULL;
703
          FENCE_ISSUE_MORE (f) = issue_rate;
704
        }
705
      else
706
        if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
707
          {
708
            /* Would be weird if same insn is successor of several fallthrough
709
               edges.  */
710
            gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
711
                        != BLOCK_FOR_INSN (last_scheduled_insn_old));
712
 
713
            state_free (FENCE_STATE (f));
714
            FENCE_STATE (f) = state;
715
 
716
            delete_target_context (FENCE_TC (f));
717
            FENCE_TC (f) = tc;
718
 
719
            FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
720
            FENCE_ISSUE_MORE (f) = issue_more;
721
          }
722
        else
723
          {
724
            /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched.  */
725
            state_free (state);
726
            delete_target_context (tc);
727
 
728
            gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
729
                        != BLOCK_FOR_INSN (last_scheduled_insn));
730
          }
731
 
732
        /* Find edge of first predecessor (last_scheduled_insn_old->insn).  */
733
        FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
734
                         SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
735
          {
736
            if (succ == insn)
737
              {
738
                /* No same successor allowed from several edges.  */
739
                gcc_assert (!edge_old);
740
                edge_old = si.e1;
741
              }
742
          }
743
        /* Find edge of second predecessor (last_scheduled_insn->insn).  */
744
        FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
745
                         SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
746
          {
747
            if (succ == insn)
748
              {
749
                /* No same successor allowed from several edges.  */
750
                gcc_assert (!edge_new);
751
                edge_new = si.e1;
752
              }
753
          }
754
 
755
        /* Check if we can choose most probable predecessor.  */
756
        if (edge_old == NULL || edge_new == NULL)
757
          {
758
            reset_deps_context (FENCE_DC (f));
759
            delete_deps_context (dc);
760
            VEC_free (rtx, gc, executing_insns);
761
            free (ready_ticks);
762
 
763
            FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
764
            if (FENCE_EXECUTING_INSNS (f))
765
              VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
766
                                VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
767
            if (FENCE_READY_TICKS (f))
768
              memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
769
          }
770
        else
771
          if (edge_new->probability > edge_old->probability)
772
            {
773
              delete_deps_context (FENCE_DC (f));
774
              FENCE_DC (f) = dc;
775
              VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
776
              FENCE_EXECUTING_INSNS (f) = executing_insns;
777
              free (FENCE_READY_TICKS (f));
778
              FENCE_READY_TICKS (f) = ready_ticks;
779
              FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
780
              FENCE_CYCLE (f) = cycle;
781
            }
782
          else
783
            {
784
              /* Leave DC and CYCLE untouched.  */
785
              delete_deps_context (dc);
786
              VEC_free (rtx, gc, executing_insns);
787
              free (ready_ticks);
788
            }
789
    }
790
 
791
  /* Fill remaining invariant fields.  */
792
  if (after_stall_p)
793
    FENCE_AFTER_STALL_P (f) = 1;
794
 
795
  FENCE_ISSUED_INSNS (f) = 0;
796
  FENCE_STARTS_CYCLE_P (f) = 1;
797
  FENCE_SCHED_NEXT (f) = NULL;
798
}
799
 
800
/* Add a new fence to NEW_FENCES list, initializing it from all
801
   other parameters.  */
802
static void
803
add_to_fences (flist_tail_t new_fences, insn_t insn,
804
               state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
805
               VEC(rtx, gc) *executing_insns, int *ready_ticks,
806
               int ready_ticks_size, rtx sched_next, int cycle,
807
               int cycle_issued_insns, int issue_rate,
808
               bool starts_cycle_p, bool after_stall_p)
809
{
810
  fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
811
 
812
  if (! f)
813
    {
814
      flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
815
                 last_scheduled_insn, executing_insns, ready_ticks,
816
                 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
817
                 issue_rate, starts_cycle_p, after_stall_p);
818
 
819
      FLIST_TAIL_TAILP (new_fences)
820
        = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
821
    }
822
  else
823
    {
824
      merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
825
                    executing_insns, ready_ticks, ready_ticks_size,
826
                    sched_next, cycle, issue_rate, after_stall_p);
827
    }
828
}
829
 
830
/* Move the first fence in the OLD_FENCES list to NEW_FENCES.  */
831
void
832
move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
833
{
834
  fence_t f, old;
835
  flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
836
 
837
  old = FLIST_FENCE (old_fences);
838
  f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
839
                    FENCE_INSN (FLIST_FENCE (old_fences)));
840
  if (f)
841
    {
842
      merge_fences (f, old->insn, old->state, old->dc, old->tc,
843
                    old->last_scheduled_insn, old->executing_insns,
844
                    old->ready_ticks, old->ready_ticks_size,
845
                    old->sched_next, old->cycle, old->issue_more,
846
                    old->after_stall_p);
847
    }
848
  else
849
    {
850
      _list_add (tailp);
851
      FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
852
      *FLIST_FENCE (*tailp) = *old;
853
      init_fence_for_scheduling (FLIST_FENCE (*tailp));
854
    }
855
  FENCE_INSN (old) = NULL;
856
}
857
 
858
/* Add a new fence to NEW_FENCES list and initialize most of its data
859
   as a clean one.  */
860
void
861
add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
862
{
863
  int ready_ticks_size = get_max_uid () + 1;
864
 
865
  add_to_fences (new_fences,
866
                 succ, state_create (), create_deps_context (),
867
                 create_target_context (true),
868
                 NULL_RTX, NULL,
869
                 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
870
                 NULL_RTX, FENCE_CYCLE (fence) + 1,
871
                 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
872
}
873
 
874
/* Add a new fence to NEW_FENCES list and initialize all of its data
875
   from FENCE and SUCC.  */
876
void
877
add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
878
{
879
  int * new_ready_ticks
880
    = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
881
 
882
  memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
883
          FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
884
  add_to_fences (new_fences,
885
                 succ, state_create_copy (FENCE_STATE (fence)),
886
                 create_copy_of_deps_context (FENCE_DC (fence)),
887
                 create_copy_of_target_context (FENCE_TC (fence)),
888
                 FENCE_LAST_SCHEDULED_INSN (fence),
889
                 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
890
                 new_ready_ticks,
891
                 FENCE_READY_TICKS_SIZE (fence),
892
                 FENCE_SCHED_NEXT (fence),
893
                 FENCE_CYCLE (fence),
894
                 FENCE_ISSUED_INSNS (fence),
895
                 FENCE_ISSUE_MORE (fence),
896
                 FENCE_STARTS_CYCLE_P (fence),
897
                 FENCE_AFTER_STALL_P (fence));
898
}
899
 
900
 
901
/* Functions to work with regset and nop pools.  */
902
 
903
/* Returns the new regset from pool.  It might have some of the bits set
904
   from the previous usage.  */
905
regset
906
get_regset_from_pool (void)
907
{
908
  regset rs;
909
 
910
  if (regset_pool.n != 0)
911
    rs = regset_pool.v[--regset_pool.n];
912
  else
913
    /* We need to create the regset.  */
914
    {
915
      rs = ALLOC_REG_SET (&reg_obstack);
916
 
917
      if (regset_pool.nn == regset_pool.ss)
918
        regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
919
                                     (regset_pool.ss = 2 * regset_pool.ss + 1));
920
      regset_pool.vv[regset_pool.nn++] = rs;
921
    }
922
 
923
  regset_pool.diff++;
924
 
925
  return rs;
926
}
927
 
928
/* Same as above, but returns the empty regset.  */
929
regset
930
get_clear_regset_from_pool (void)
931
{
932
  regset rs = get_regset_from_pool ();
933
 
934
  CLEAR_REG_SET (rs);
935
  return rs;
936
}
937
 
938
/* Return regset RS to the pool for future use.  */
939
void
940
return_regset_to_pool (regset rs)
941
{
942
  regset_pool.diff--;
943
 
944
  if (regset_pool.n == regset_pool.s)
945
    regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
946
                                (regset_pool.s = 2 * regset_pool.s + 1));
947
  regset_pool.v[regset_pool.n++] = rs;
948
}
949
 
950
#ifdef ENABLE_CHECKING
951
/* This is used as a qsort callback for sorting regset pool stacks.
952
   X and XX are addresses of two regsets.  They are never equal.  */
953
static int
954
cmp_v_in_regset_pool (const void *x, const void *xx)
955
{
956
  return *((const regset *) x) - *((const regset *) xx);
957
}
958
#endif
959
 
960
/*  Free the regset pool possibly checking for memory leaks.  */
961
void
962
free_regset_pool (void)
963
{
964
#ifdef ENABLE_CHECKING
965
  {
966
    regset *v = regset_pool.v;
967
    int i = 0;
968
    int n = regset_pool.n;
969
 
970
    regset *vv = regset_pool.vv;
971
    int ii = 0;
972
    int nn = regset_pool.nn;
973
 
974
    int diff = 0;
975
 
976
    gcc_assert (n <= nn);
977
 
978
    /* Sort both vectors so it will be possible to compare them.  */
979
    qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
980
    qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
981
 
982
    while (ii < nn)
983
      {
984
        if (v[i] == vv[ii])
985
          i++;
986
        else
987
          /* VV[II] was lost.  */
988
          diff++;
989
 
990
        ii++;
991
      }
992
 
993
    gcc_assert (diff == regset_pool.diff);
994
  }
995
#endif
996
 
997
  /* If not true - we have a memory leak.  */
998
  gcc_assert (regset_pool.diff == 0);
999
 
1000
  while (regset_pool.n)
1001
    {
1002
      --regset_pool.n;
1003
      FREE_REG_SET (regset_pool.v[regset_pool.n]);
1004
    }
1005
 
1006
  free (regset_pool.v);
1007
  regset_pool.v = NULL;
1008
  regset_pool.s = 0;
1009
 
1010
  free (regset_pool.vv);
1011
  regset_pool.vv = NULL;
1012
  regset_pool.nn = 0;
1013
  regset_pool.ss = 0;
1014
 
1015
  regset_pool.diff = 0;
1016
}
1017
 
1018
 
1019
/* Functions to work with nop pools.  NOP insns are used as temporary
1020
   placeholders of the insns being scheduled to allow correct update of
1021
   the data sets.  When update is finished, NOPs are deleted.  */
1022
 
1023
/* A vinsn that is used to represent a nop.  This vinsn is shared among all
1024
   nops sel-sched generates.  */
1025
static vinsn_t nop_vinsn = NULL;
1026
 
1027
/* Emit a nop before INSN, taking it from pool.  */
1028
insn_t
1029
get_nop_from_pool (insn_t insn)
1030
{
1031
  insn_t nop;
1032
  bool old_p = nop_pool.n != 0;
1033
  int flags;
1034
 
1035
  if (old_p)
1036
    nop = nop_pool.v[--nop_pool.n];
1037
  else
1038
    nop = nop_pattern;
1039
 
1040
  nop = emit_insn_before (nop, insn);
1041
 
1042
  if (old_p)
1043
    flags = INSN_INIT_TODO_SSID;
1044
  else
1045
    flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1046
 
1047
  set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1048
  sel_init_new_insn (nop, flags);
1049
 
1050
  return nop;
1051
}
1052
 
1053
/* Remove NOP from the instruction stream and return it to the pool.  */
1054
void
1055
return_nop_to_pool (insn_t nop, bool full_tidying)
1056
{
1057
  gcc_assert (INSN_IN_STREAM_P (nop));
1058
  sel_remove_insn (nop, false, full_tidying);
1059
 
1060
  if (nop_pool.n == nop_pool.s)
1061
    nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1062
                             (nop_pool.s = 2 * nop_pool.s + 1));
1063
  nop_pool.v[nop_pool.n++] = nop;
1064
}
1065
 
1066
/* Free the nop pool.  */
1067
void
1068
free_nop_pool (void)
1069
{
1070
  nop_pool.n = 0;
1071
  nop_pool.s = 0;
1072
  free (nop_pool.v);
1073
  nop_pool.v = NULL;
1074
}
1075
 
1076
 
1077
/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1078
   The callback is given two rtxes XX and YY and writes the new rtxes
1079
   to NX and NY in case some needs to be skipped.  */
1080
static int
1081
skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1082
{
1083
  const_rtx x = *xx;
1084
  const_rtx y = *yy;
1085
 
1086
  if (GET_CODE (x) == UNSPEC
1087
      && (targetm.sched.skip_rtx_p == NULL
1088
          || targetm.sched.skip_rtx_p (x)))
1089
    {
1090
      *nx = XVECEXP (x, 0, 0);
1091
      *ny = CONST_CAST_RTX (y);
1092
      return 1;
1093
    }
1094
 
1095
  if (GET_CODE (y) == UNSPEC
1096
      && (targetm.sched.skip_rtx_p == NULL
1097
          || targetm.sched.skip_rtx_p (y)))
1098
    {
1099
      *nx = CONST_CAST_RTX (x);
1100
      *ny = XVECEXP (y, 0, 0);
1101
      return 1;
1102
    }
1103
 
1104
  return 0;
1105
}
1106
 
1107
/* Callback, called from hash_rtx_cb.  Helps to hash UNSPEC rtx X in a correct way
1108
   to support ia64 speculation.  When changes are needed, new rtx X and new mode
1109
   NMODE are written, and the callback returns true.  */
1110
static int
1111
hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1112
                           rtx *nx, enum machine_mode* nmode)
1113
{
1114
  if (GET_CODE (x) == UNSPEC
1115
      && targetm.sched.skip_rtx_p
1116
      && targetm.sched.skip_rtx_p (x))
1117
    {
1118
      *nx = XVECEXP (x, 0 ,0);
1119
      *nmode = VOIDmode;
1120
      return 1;
1121
    }
1122
 
1123
  return 0;
1124
}
1125
 
1126
/* Returns LHS and RHS are ok to be scheduled separately.  */
1127
static bool
1128
lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1129
{
1130
  if (lhs == NULL || rhs == NULL)
1131
    return false;
1132
 
1133
  /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1134
     to use reg, if const can be used.  Moreover, scheduling const as rhs may
1135
     lead to mode mismatch cause consts don't have modes but they could be
1136
     merged from branches where the same const used in different modes.  */
1137
  if (CONSTANT_P (rhs))
1138
    return false;
1139
 
1140
  /* ??? Do not rename predicate registers to avoid ICEs in bundling.  */
1141
  if (COMPARISON_P (rhs))
1142
      return false;
1143
 
1144
  /* Do not allow single REG to be an rhs.  */
1145
  if (REG_P (rhs))
1146
    return false;
1147
 
1148
  /* See comment at find_used_regs_1 (*1) for explanation of this
1149
     restriction.  */
1150
  /* FIXME: remove this later.  */
1151
  if (MEM_P (lhs))
1152
    return false;
1153
 
1154
  /* This will filter all tricky things like ZERO_EXTRACT etc.
1155
     For now we don't handle it.  */
1156
  if (!REG_P (lhs) && !MEM_P (lhs))
1157
    return false;
1158
 
1159
  return true;
1160
}
1161
 
1162
/* Initialize vinsn VI for INSN.  Only for use from vinsn_create ().  When
1163
   FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable.  This is
1164
   used e.g. for insns from recovery blocks.  */
1165
static void
1166
vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1167
{
1168
  hash_rtx_callback_function hrcf;
1169
  int insn_class;
1170
 
1171
  VINSN_INSN_RTX (vi) = insn;
1172
  VINSN_COUNT (vi) = 0;
1173
  vi->cost = -1;
1174
 
1175
  if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1176
    init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1177
  else
1178
    deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1179
 
1180
  /* Hash vinsn depending on whether it is separable or not.  */
1181
  hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1182
  if (VINSN_SEPARABLE_P (vi))
1183
    {
1184
      rtx rhs = VINSN_RHS (vi);
1185
 
1186
      VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1187
                                     NULL, NULL, false, hrcf);
1188
      VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1189
                                         VOIDmode, NULL, NULL,
1190
                                         false, hrcf);
1191
    }
1192
  else
1193
    {
1194
      VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1195
                                     NULL, NULL, false, hrcf);
1196
      VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1197
    }
1198
 
1199
  insn_class = haifa_classify_insn (insn);
1200
  if (insn_class >= 2
1201
      && (!targetm.sched.get_insn_spec_ds
1202
          || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1203
              == 0)))
1204
    VINSN_MAY_TRAP_P (vi) = true;
1205
  else
1206
    VINSN_MAY_TRAP_P (vi) = false;
1207
}
1208
 
1209
/* Indicate that VI has become the part of an rtx object.  */
1210
void
1211
vinsn_attach (vinsn_t vi)
1212
{
1213
  /* Assert that VI is not pending for deletion.  */
1214
  gcc_assert (VINSN_INSN_RTX (vi));
1215
 
1216
  VINSN_COUNT (vi)++;
1217
}
1218
 
1219
/* Create and init VI from the INSN.  Use UNIQUE_P for determining the correct
1220
   VINSN_TYPE (VI).  */
1221
static vinsn_t
1222
vinsn_create (insn_t insn, bool force_unique_p)
1223
{
1224
  vinsn_t vi = XCNEW (struct vinsn_def);
1225
 
1226
  vinsn_init (vi, insn, force_unique_p);
1227
  return vi;
1228
}
1229
 
1230
/* Return a copy of VI.  When REATTACH_P is true, detach VI and attach
1231
   the copy.  */
1232
vinsn_t
1233
vinsn_copy (vinsn_t vi, bool reattach_p)
1234
{
1235
  rtx copy;
1236
  bool unique = VINSN_UNIQUE_P (vi);
1237
  vinsn_t new_vi;
1238
 
1239
  copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1240
  new_vi = create_vinsn_from_insn_rtx (copy, unique);
1241
  if (reattach_p)
1242
    {
1243
      vinsn_detach (vi);
1244
      vinsn_attach (new_vi);
1245
    }
1246
 
1247
  return new_vi;
1248
}
1249
 
1250
/* Delete the VI vinsn and free its data.  */
1251
static void
1252
vinsn_delete (vinsn_t vi)
1253
{
1254
  gcc_assert (VINSN_COUNT (vi) == 0);
1255
 
1256
  return_regset_to_pool (VINSN_REG_SETS (vi));
1257
  return_regset_to_pool (VINSN_REG_USES (vi));
1258
  return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1259
 
1260
  free (vi);
1261
}
1262
 
1263
/* Indicate that VI is no longer a part of some rtx object.
1264
   Remove VI if it is no longer needed.  */
1265
void
1266
vinsn_detach (vinsn_t vi)
1267
{
1268
  gcc_assert (VINSN_COUNT (vi) > 0);
1269
 
1270
  if (--VINSN_COUNT (vi) == 0)
1271
    vinsn_delete (vi);
1272
}
1273
 
1274
/* Returns TRUE if VI is a branch.  */
1275
bool
1276
vinsn_cond_branch_p (vinsn_t vi)
1277
{
1278
  insn_t insn;
1279
 
1280
  if (!VINSN_UNIQUE_P (vi))
1281
    return false;
1282
 
1283
  insn = VINSN_INSN_RTX (vi);
1284
  if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1285
    return false;
1286
 
1287
  return control_flow_insn_p (insn);
1288
}
1289
 
1290
/* Return latency of INSN.  */
1291
static int
1292
sel_insn_rtx_cost (rtx insn)
1293
{
1294
  int cost;
1295
 
1296
  /* A USE insn, or something else we don't need to
1297
     understand.  We can't pass these directly to
1298
     result_ready_cost or insn_default_latency because it will
1299
     trigger a fatal error for unrecognizable insns.  */
1300
  if (recog_memoized (insn) < 0)
1301
    cost = 0;
1302
  else
1303
    {
1304
      cost = insn_default_latency (insn);
1305
 
1306
      if (cost < 0)
1307
        cost = 0;
1308
    }
1309
 
1310
  return cost;
1311
}
1312
 
1313
/* Return the cost of the VI.
1314
   !!! FIXME: Unify with haifa-sched.c: insn_cost ().  */
1315
int
1316
sel_vinsn_cost (vinsn_t vi)
1317
{
1318
  int cost = vi->cost;
1319
 
1320
  if (cost < 0)
1321
    {
1322
      cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1323
      vi->cost = cost;
1324
    }
1325
 
1326
  return cost;
1327
}
1328
 
1329
 
1330
/* Functions for insn emitting.  */
1331
 
1332
/* Emit new insn after AFTER based on PATTERN and initialize its data from
1333
   EXPR and SEQNO.  */
1334
insn_t
1335
sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1336
{
1337
  insn_t new_insn;
1338
 
1339
  gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1340
 
1341
  new_insn = emit_insn_after (pattern, after);
1342
  set_insn_init (expr, NULL, seqno);
1343
  sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1344
 
1345
  return new_insn;
1346
}
1347
 
1348
/* Force newly generated vinsns to be unique.  */
1349
static bool init_insn_force_unique_p = false;
1350
 
1351
/* Emit new speculation recovery insn after AFTER based on PATTERN and
1352
   initialize its data from EXPR and SEQNO.  */
1353
insn_t
1354
sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1355
                                      insn_t after)
1356
{
1357
  insn_t insn;
1358
 
1359
  gcc_assert (!init_insn_force_unique_p);
1360
 
1361
  init_insn_force_unique_p = true;
1362
  insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1363
  CANT_MOVE (insn) = 1;
1364
  init_insn_force_unique_p = false;
1365
 
1366
  return insn;
1367
}
1368
 
1369
/* Emit new insn after AFTER based on EXPR and SEQNO.  If VINSN is not NULL,
1370
   take it as a new vinsn instead of EXPR's vinsn.
1371
   We simplify insns later, after scheduling region in
1372
   simplify_changed_insns.  */
1373
insn_t
1374
sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1375
                              insn_t after)
1376
{
1377
  expr_t emit_expr;
1378
  insn_t insn;
1379
  int flags;
1380
 
1381
  emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1382
                             seqno);
1383
  insn = EXPR_INSN_RTX (emit_expr);
1384
  add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1385
 
1386
  flags = INSN_INIT_TODO_SSID;
1387
  if (INSN_LUID (insn) == 0)
1388
    flags |= INSN_INIT_TODO_LUID;
1389
  sel_init_new_insn (insn, flags);
1390
 
1391
  return insn;
1392
}
1393
 
1394
/* Move insn from EXPR after AFTER.  */
1395
insn_t
1396
sel_move_insn (expr_t expr, int seqno, insn_t after)
1397
{
1398
  insn_t insn = EXPR_INSN_RTX (expr);
1399
  basic_block bb = BLOCK_FOR_INSN (after);
1400
  insn_t next = NEXT_INSN (after);
1401
 
1402
  /* Assert that in move_op we disconnected this insn properly.  */
1403
  gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1404
  PREV_INSN (insn) = after;
1405
  NEXT_INSN (insn) = next;
1406
 
1407
  NEXT_INSN (after) = insn;
1408
  PREV_INSN (next) = insn;
1409
 
1410
  /* Update links from insn to bb and vice versa.  */
1411
  df_insn_change_bb (insn, bb);
1412
  if (BB_END (bb) == after)
1413
    BB_END (bb) = insn;
1414
 
1415
  prepare_insn_expr (insn, seqno);
1416
  return insn;
1417
}
1418
 
1419
 
1420
/* Functions to work with right-hand sides.  */
1421
 
1422
/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1423
   VECT and return true when found.  Use NEW_VINSN for comparison only when
1424
   COMPARE_VINSNS is true.  Write to INDP the index on which
1425
   the search has stopped, such that inserting the new element at INDP will
1426
   retain VECT's sort order.  */
1427
static bool
1428
find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1429
                        unsigned uid, vinsn_t new_vinsn,
1430
                        bool compare_vinsns, int *indp)
1431
{
1432
  expr_history_def *arr;
1433
  int i, j, len = VEC_length (expr_history_def, vect);
1434
 
1435
  if (len == 0)
1436
    {
1437
      *indp = 0;
1438
      return false;
1439
    }
1440
 
1441
  arr = VEC_address (expr_history_def, vect);
1442
  i = 0, j = len - 1;
1443
 
1444
  while (i <= j)
1445
    {
1446
      unsigned auid = arr[i].uid;
1447
      vinsn_t avinsn = arr[i].new_expr_vinsn;
1448
 
1449
      if (auid == uid
1450
          /* When undoing transformation on a bookkeeping copy, the new vinsn
1451
             may not be exactly equal to the one that is saved in the vector.
1452
             This is because the insn whose copy we're checking was possibly
1453
             substituted itself.  */
1454
          && (! compare_vinsns
1455
              || vinsn_equal_p (avinsn, new_vinsn)))
1456
        {
1457
          *indp = i;
1458
          return true;
1459
        }
1460
      else if (auid > uid)
1461
        break;
1462
      i++;
1463
    }
1464
 
1465
  *indp = i;
1466
  return false;
1467
}
1468
 
1469
/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT.  Return
1470
   the position found or -1, if no such value is in vector.
1471
   Search also for UIDs of insn's originators, if ORIGINATORS_P is true.  */
1472
int
1473
find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
1474
                      vinsn_t new_vinsn, bool originators_p)
1475
{
1476
  int ind;
1477
 
1478
  if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1479
                              false, &ind))
1480
    return ind;
1481
 
1482
  if (INSN_ORIGINATORS (insn) && originators_p)
1483
    {
1484
      unsigned uid;
1485
      bitmap_iterator bi;
1486
 
1487
      EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1488
        if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1489
          return ind;
1490
    }
1491
 
1492
  return -1;
1493
}
1494
 
1495
/* Insert new element in a sorted history vector pointed to by PVECT,
1496
   if it is not there already.  The element is searched using
1497
   UID/NEW_EXPR_VINSN pair.  TYPE, OLD_EXPR_VINSN and SPEC_DS save
1498
   the history of a transformation.  */
1499
void
1500
insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1501
                        unsigned uid, enum local_trans_type type,
1502
                        vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1503
                        ds_t spec_ds)
1504
{
1505
  VEC(expr_history_def, heap) *vect = *pvect;
1506
  expr_history_def temp;
1507
  bool res;
1508
  int ind;
1509
 
1510
  res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1511
 
1512
  if (res)
1513
    {
1514
      expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1515
 
1516
      /* It is possible that speculation types of expressions that were
1517
         propagated through different paths will be different here.  In this
1518
         case, merge the status to get the correct check later.  */
1519
      if (phist->spec_ds != spec_ds)
1520
        phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1521
      return;
1522
    }
1523
 
1524
  temp.uid = uid;
1525
  temp.old_expr_vinsn = old_expr_vinsn;
1526
  temp.new_expr_vinsn = new_expr_vinsn;
1527
  temp.spec_ds = spec_ds;
1528
  temp.type = type;
1529
 
1530
  vinsn_attach (old_expr_vinsn);
1531
  vinsn_attach (new_expr_vinsn);
1532
  VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1533
  *pvect = vect;
1534
}
1535
 
1536
/* Free history vector PVECT.  */
1537
static void
1538
free_history_vect (VEC (expr_history_def, heap) **pvect)
1539
{
1540
  unsigned i;
1541
  expr_history_def *phist;
1542
 
1543
  if (! *pvect)
1544
    return;
1545
 
1546
  for (i = 0;
1547
       VEC_iterate (expr_history_def, *pvect, i, phist);
1548
       i++)
1549
    {
1550
      vinsn_detach (phist->old_expr_vinsn);
1551
      vinsn_detach (phist->new_expr_vinsn);
1552
    }
1553
 
1554
  VEC_free (expr_history_def, heap, *pvect);
1555
  *pvect = NULL;
1556
}
1557
 
1558
 
1559
/* Compare two vinsns as rhses if possible and as vinsns otherwise.  */
1560
bool
1561
vinsn_equal_p (vinsn_t x, vinsn_t y)
1562
{
1563
  rtx_equal_p_callback_function repcf;
1564
 
1565
  if (x == y)
1566
    return true;
1567
 
1568
  if (VINSN_TYPE (x) != VINSN_TYPE (y))
1569
    return false;
1570
 
1571
  if (VINSN_HASH (x) != VINSN_HASH (y))
1572
    return false;
1573
 
1574
  repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1575
  if (VINSN_SEPARABLE_P (x))
1576
    {
1577
      /* Compare RHSes of VINSNs.  */
1578
      gcc_assert (VINSN_RHS (x));
1579
      gcc_assert (VINSN_RHS (y));
1580
 
1581
      return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1582
    }
1583
 
1584
  return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1585
}
1586
 
1587
 
1588
/* Functions for working with expressions.  */
1589
 
1590
/* Initialize EXPR.  */
1591
static void
1592
init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1593
           int sched_times, int orig_bb_index, ds_t spec_done_ds,
1594
           ds_t spec_to_check_ds, int orig_sched_cycle,
1595
           VEC(expr_history_def, heap) *history, bool target_available,
1596
           bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1597
           bool cant_move)
1598
{
1599
  vinsn_attach (vi);
1600
 
1601
  EXPR_VINSN (expr) = vi;
1602
  EXPR_SPEC (expr) = spec;
1603
  EXPR_USEFULNESS (expr) = use;
1604
  EXPR_PRIORITY (expr) = priority;
1605
  EXPR_PRIORITY_ADJ (expr) = 0;
1606
  EXPR_SCHED_TIMES (expr) = sched_times;
1607
  EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1608
  EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1609
  EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1610
  EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1611
 
1612
  if (history)
1613
    EXPR_HISTORY_OF_CHANGES (expr) = history;
1614
  else
1615
    EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1616
 
1617
  EXPR_TARGET_AVAILABLE (expr) = target_available;
1618
  EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1619
  EXPR_WAS_RENAMED (expr) = was_renamed;
1620
  EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1621
  EXPR_CANT_MOVE (expr) = cant_move;
1622
}
1623
 
1624
/* Make a copy of the expr FROM into the expr TO.  */
1625
void
1626
copy_expr (expr_t to, expr_t from)
1627
{
1628
  VEC(expr_history_def, heap) *temp = NULL;
1629
 
1630
  if (EXPR_HISTORY_OF_CHANGES (from))
1631
    {
1632
      unsigned i;
1633
      expr_history_def *phist;
1634
 
1635
      temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
1636
      for (i = 0;
1637
           VEC_iterate (expr_history_def, temp, i, phist);
1638
           i++)
1639
        {
1640
          vinsn_attach (phist->old_expr_vinsn);
1641
          vinsn_attach (phist->new_expr_vinsn);
1642
        }
1643
    }
1644
 
1645
  init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1646
             EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1647
             EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1648
             EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1649
             EXPR_ORIG_SCHED_CYCLE (from), temp,
1650
             EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1651
             EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1652
             EXPR_CANT_MOVE (from));
1653
}
1654
 
1655
/* Same, but the final expr will not ever be in av sets, so don't copy
1656
   "uninteresting" data such as bitmap cache.  */
1657
void
1658
copy_expr_onside (expr_t to, expr_t from)
1659
{
1660
  init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1661
             EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1662
             EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1663
             EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1664
             EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1665
             EXPR_CANT_MOVE (from));
1666
}
1667
 
1668
/* Prepare the expr of INSN for scheduling.  Used when moving insn and when
1669
   initializing new insns.  */
1670
static void
1671
prepare_insn_expr (insn_t insn, int seqno)
1672
{
1673
  expr_t expr = INSN_EXPR (insn);
1674
  ds_t ds;
1675
 
1676
  INSN_SEQNO (insn) = seqno;
1677
  EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1678
  EXPR_SPEC (expr) = 0;
1679
  EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1680
  EXPR_WAS_SUBSTITUTED (expr) = 0;
1681
  EXPR_WAS_RENAMED (expr) = 0;
1682
  EXPR_TARGET_AVAILABLE (expr) = 1;
1683
  INSN_LIVE_VALID_P (insn) = false;
1684
 
1685
  /* ??? If this expression is speculative, make its dependence
1686
     as weak as possible.  We can filter this expression later
1687
     in process_spec_exprs, because we do not distinguish
1688
     between the status we got during compute_av_set and the
1689
     existing status.  To be fixed.  */
1690
  ds = EXPR_SPEC_DONE_DS (expr);
1691
  if (ds)
1692
    EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1693
 
1694
  free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1695
}
1696
 
1697
/* Update target_available bits when merging exprs TO and FROM.  SPLIT_POINT
1698
   is non-null when expressions are merged from different successors at
1699
   a split point.  */
1700
static void
1701
update_target_availability (expr_t to, expr_t from, insn_t split_point)
1702
{
1703
  if (EXPR_TARGET_AVAILABLE (to) < 0
1704
      || EXPR_TARGET_AVAILABLE (from) < 0)
1705
    EXPR_TARGET_AVAILABLE (to) = -1;
1706
  else
1707
    {
1708
      /* We try to detect the case when one of the expressions
1709
         can only be reached through another one.  In this case,
1710
         we can do better.  */
1711
      if (split_point == NULL)
1712
        {
1713
          int toind, fromind;
1714
 
1715
          toind = EXPR_ORIG_BB_INDEX (to);
1716
          fromind = EXPR_ORIG_BB_INDEX (from);
1717
 
1718
          if (toind && toind == fromind)
1719
            /* Do nothing -- everything is done in
1720
               merge_with_other_exprs.  */
1721
            ;
1722
          else
1723
            EXPR_TARGET_AVAILABLE (to) = -1;
1724
        }
1725
      else
1726
        EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1727
    }
1728
}
1729
 
1730
/* Update speculation bits when merging exprs TO and FROM.  SPLIT_POINT
1731
   is non-null when expressions are merged from different successors at
1732
   a split point.  */
1733
static void
1734
update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1735
{
1736
  ds_t old_to_ds, old_from_ds;
1737
 
1738
  old_to_ds = EXPR_SPEC_DONE_DS (to);
1739
  old_from_ds = EXPR_SPEC_DONE_DS (from);
1740
 
1741
  EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1742
  EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1743
  EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1744
 
1745
  /* When merging e.g. control & data speculative exprs, or a control
1746
     speculative with a control&data speculative one, we really have
1747
     to change vinsn too.  Also, when speculative status is changed,
1748
     we also need to record this as a transformation in expr's history.  */
1749
  if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1750
    {
1751
      old_to_ds = ds_get_speculation_types (old_to_ds);
1752
      old_from_ds = ds_get_speculation_types (old_from_ds);
1753
 
1754
      if (old_to_ds != old_from_ds)
1755
        {
1756
          ds_t record_ds;
1757
 
1758
          /* When both expressions are speculative, we need to change
1759
             the vinsn first.  */
1760
          if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1761
            {
1762
              int res;
1763
 
1764
              res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1765
              gcc_assert (res >= 0);
1766
            }
1767
 
1768
          if (split_point != NULL)
1769
            {
1770
              /* Record the change with proper status.  */
1771
              record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1772
              record_ds &= ~(old_to_ds & SPECULATIVE);
1773
              record_ds &= ~(old_from_ds & SPECULATIVE);
1774
 
1775
              insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1776
                                      INSN_UID (split_point), TRANS_SPECULATION,
1777
                                      EXPR_VINSN (from), EXPR_VINSN (to),
1778
                                      record_ds);
1779
            }
1780
        }
1781
    }
1782
}
1783
 
1784
 
1785
/* Merge bits of FROM expr to TO expr.  When SPLIT_POINT is not NULL,
1786
   this is done along different paths.  */
1787
void
1788
merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1789
{
1790
  int i;
1791
  expr_history_def *phist;
1792
 
1793
  /* For now, we just set the spec of resulting expr to be minimum of the specs
1794
     of merged exprs.  */
1795
  if (EXPR_SPEC (to) > EXPR_SPEC (from))
1796
    EXPR_SPEC (to) = EXPR_SPEC (from);
1797
 
1798
  if (split_point)
1799
    EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1800
  else
1801
    EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1802
                                EXPR_USEFULNESS (from));
1803
 
1804
  if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1805
    EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1806
 
1807
  if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1808
    EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1809
 
1810
  if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1811
    EXPR_ORIG_BB_INDEX (to) = 0;
1812
 
1813
  EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1814
                                    EXPR_ORIG_SCHED_CYCLE (from));
1815
 
1816
  /* We keep this vector sorted.  */
1817
  for (i = 0;
1818
       VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
1819
                    i, phist);
1820
       i++)
1821
    insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1822
                            phist->uid, phist->type,
1823
                            phist->old_expr_vinsn, phist->new_expr_vinsn,
1824
                            phist->spec_ds);
1825
 
1826
  EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1827
  EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1828
  EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1829
 
1830
  update_target_availability (to, from, split_point);
1831
  update_speculative_bits (to, from, split_point);
1832
}
1833
 
1834
/* Merge bits of FROM expr to TO expr.  Vinsns in the exprs should be equal
1835
   in terms of vinsn_equal_p.  SPLIT_POINT is non-null when expressions
1836
   are merged from different successors at a split point.  */
1837
void
1838
merge_expr (expr_t to, expr_t from, insn_t split_point)
1839
{
1840
  vinsn_t to_vi = EXPR_VINSN (to);
1841
  vinsn_t from_vi = EXPR_VINSN (from);
1842
 
1843
  gcc_assert (vinsn_equal_p (to_vi, from_vi));
1844
 
1845
  /* Make sure that speculative pattern is propagated into exprs that
1846
     have non-speculative one.  This will provide us with consistent
1847
     speculative bits and speculative patterns inside expr.  */
1848
  if (EXPR_SPEC_DONE_DS (to) == 0
1849
      && EXPR_SPEC_DONE_DS (from) != 0)
1850
    change_vinsn_in_expr (to, EXPR_VINSN (from));
1851
 
1852
  merge_expr_data (to, from, split_point);
1853
  gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1854
}
1855
 
1856
/* Clear the information of this EXPR.  */
1857
void
1858
clear_expr (expr_t expr)
1859
{
1860
 
1861
  vinsn_detach (EXPR_VINSN (expr));
1862
  EXPR_VINSN (expr) = NULL;
1863
 
1864
  free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1865
}
1866
 
1867
/* For a given LV_SET, mark EXPR having unavailable target register.  */
1868
static void
1869
set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1870
{
1871
  if (EXPR_SEPARABLE_P (expr))
1872
    {
1873
      if (REG_P (EXPR_LHS (expr))
1874
          && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
1875
        {
1876
          /* If it's an insn like r1 = use (r1, ...), and it exists in
1877
             different forms in each of the av_sets being merged, we can't say
1878
             whether original destination register is available or not.
1879
             However, this still works if destination register is not used
1880
             in the original expression: if the branch at which LV_SET we're
1881
             looking here is not actually 'other branch' in sense that same
1882
             expression is available through it (but it can't be determined
1883
             at computation stage because of transformations on one of the
1884
             branches), it still won't affect the availability.
1885
             Liveness of a register somewhere on a code motion path means
1886
             it's either read somewhere on a codemotion path, live on
1887
             'other' branch, live at the point immediately following
1888
             the original operation, or is read by the original operation.
1889
             The latter case is filtered out in the condition below.
1890
             It still doesn't cover the case when register is defined and used
1891
             somewhere within the code motion path, and in this case we could
1892
             miss a unifying code motion along both branches using a renamed
1893
             register, but it won't affect a code correctness since upon
1894
             an actual code motion a bookkeeping code would be generated.  */
1895
          if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1896
                            REGNO (EXPR_LHS (expr))))
1897
            EXPR_TARGET_AVAILABLE (expr) = -1;
1898
          else
1899
            EXPR_TARGET_AVAILABLE (expr) = false;
1900
        }
1901
    }
1902
  else
1903
    {
1904
      unsigned regno;
1905
      reg_set_iterator rsi;
1906
 
1907
      EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1908
                                 0, regno, rsi)
1909
        if (bitmap_bit_p (lv_set, regno))
1910
          {
1911
            EXPR_TARGET_AVAILABLE (expr) = false;
1912
            break;
1913
          }
1914
 
1915
      EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1916
                                 0, regno, rsi)
1917
        if (bitmap_bit_p (lv_set, regno))
1918
          {
1919
            EXPR_TARGET_AVAILABLE (expr) = false;
1920
            break;
1921
          }
1922
    }
1923
}
1924
 
1925
/* Try to make EXPR speculative.  Return 1 when EXPR's pattern
1926
   or dependence status have changed, 2 when also the target register
1927
   became unavailable, 0 if nothing had to be changed.  */
1928
int
1929
speculate_expr (expr_t expr, ds_t ds)
1930
{
1931
  int res;
1932
  rtx orig_insn_rtx;
1933
  rtx spec_pat;
1934
  ds_t target_ds, current_ds;
1935
 
1936
  /* Obtain the status we need to put on EXPR.   */
1937
  target_ds = (ds & SPECULATIVE);
1938
  current_ds = EXPR_SPEC_DONE_DS (expr);
1939
  ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1940
 
1941
  orig_insn_rtx = EXPR_INSN_RTX (expr);
1942
 
1943
  res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1944
 
1945
  switch (res)
1946
    {
1947
    case 0:
1948
      EXPR_SPEC_DONE_DS (expr) = ds;
1949
      return current_ds != ds ? 1 : 0;
1950
 
1951
    case 1:
1952
      {
1953
        rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1954
        vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1955
 
1956
        change_vinsn_in_expr (expr, spec_vinsn);
1957
        EXPR_SPEC_DONE_DS (expr) = ds;
1958
        EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1959
 
1960
        /* Do not allow clobbering the address register of speculative
1961
           insns.  */
1962
        if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1963
                          expr_dest_regno (expr)))
1964
          {
1965
            EXPR_TARGET_AVAILABLE (expr) = false;
1966
            return 2;
1967
          }
1968
 
1969
        return 1;
1970
      }
1971
 
1972
    case -1:
1973
      return -1;
1974
 
1975
    default:
1976
      gcc_unreachable ();
1977
      return -1;
1978
    }
1979
}
1980
 
1981
/* Return a destination register, if any, of EXPR.  */
1982
rtx
1983
expr_dest_reg (expr_t expr)
1984
{
1985
  rtx dest = VINSN_LHS (EXPR_VINSN (expr));
1986
 
1987
  if (dest != NULL_RTX && REG_P (dest))
1988
    return dest;
1989
 
1990
  return NULL_RTX;
1991
}
1992
 
1993
/* Returns the REGNO of the R's destination.  */
1994
unsigned
1995
expr_dest_regno (expr_t expr)
1996
{
1997
  rtx dest = expr_dest_reg (expr);
1998
 
1999
  gcc_assert (dest != NULL_RTX);
2000
  return REGNO (dest);
2001
}
2002
 
2003
/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2004
   AV_SET having unavailable target register.  */
2005
void
2006
mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2007
{
2008
  expr_t expr;
2009
  av_set_iterator avi;
2010
 
2011
  FOR_EACH_EXPR (expr, avi, join_set)
2012
    if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2013
      set_unavailable_target_for_expr (expr, lv_set);
2014
}
2015
 
2016
 
2017
/* Av set functions.  */
2018
 
2019
/* Add a new element to av set SETP.
2020
   Return the element added.  */
2021
static av_set_t
2022
av_set_add_element (av_set_t *setp)
2023
{
2024
  /* Insert at the beginning of the list.  */
2025
  _list_add (setp);
2026
  return *setp;
2027
}
2028
 
2029
/* Add EXPR to SETP.  */
2030
void
2031
av_set_add (av_set_t *setp, expr_t expr)
2032
{
2033
  av_set_t elem;
2034
 
2035
  gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2036
  elem = av_set_add_element (setp);
2037
  copy_expr (_AV_SET_EXPR (elem), expr);
2038
}
2039
 
2040
/* Same, but do not copy EXPR.  */
2041
static void
2042
av_set_add_nocopy (av_set_t *setp, expr_t expr)
2043
{
2044
  av_set_t elem;
2045
 
2046
  elem = av_set_add_element (setp);
2047
  *_AV_SET_EXPR (elem) = *expr;
2048
}
2049
 
2050
/* Remove expr pointed to by IP from the av_set.  */
2051
void
2052
av_set_iter_remove (av_set_iterator *ip)
2053
{
2054
  clear_expr (_AV_SET_EXPR (*ip->lp));
2055
  _list_iter_remove (ip);
2056
}
2057
 
2058
/* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2059
   sense of vinsn_equal_p function. Return NULL if no such expr is
2060
   in SET was found.  */
2061
expr_t
2062
av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2063
{
2064
  expr_t expr;
2065
  av_set_iterator i;
2066
 
2067
  FOR_EACH_EXPR (expr, i, set)
2068
    if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2069
      return expr;
2070
  return NULL;
2071
}
2072
 
2073
/* Same, but also remove the EXPR found.   */
2074
static expr_t
2075
av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2076
{
2077
  expr_t expr;
2078
  av_set_iterator i;
2079
 
2080
  FOR_EACH_EXPR_1 (expr, i, setp)
2081
    if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2082
      {
2083
        _list_iter_remove_nofree (&i);
2084
        return expr;
2085
      }
2086
  return NULL;
2087
}
2088
 
2089
/* Search for an expr in SET, such that it's equivalent to EXPR in the
2090
   sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2091
   Returns NULL if no such expr is in SET was found.  */
2092
static expr_t
2093
av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2094
{
2095
  expr_t cur_expr;
2096
  av_set_iterator i;
2097
 
2098
  FOR_EACH_EXPR (cur_expr, i, set)
2099
    {
2100
      if (cur_expr == expr)
2101
        continue;
2102
      if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2103
        return cur_expr;
2104
    }
2105
 
2106
  return NULL;
2107
}
2108
 
2109
/* If other expression is already in AVP, remove one of them.  */
2110
expr_t
2111
merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2112
{
2113
  expr_t expr2;
2114
 
2115
  expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2116
  if (expr2 != NULL)
2117
    {
2118
      /* Reset target availability on merge, since taking it only from one
2119
         of the exprs would be controversial for different code.  */
2120
      EXPR_TARGET_AVAILABLE (expr2) = -1;
2121
      EXPR_USEFULNESS (expr2) = 0;
2122
 
2123
      merge_expr (expr2, expr, NULL);
2124
 
2125
      /* Fix usefulness as it should be now REG_BR_PROB_BASE.  */
2126
      EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2127
 
2128
      av_set_iter_remove (ip);
2129
      return expr2;
2130
    }
2131
 
2132
  return expr;
2133
}
2134
 
2135
/* Return true if there is an expr that correlates to VI in SET.  */
2136
bool
2137
av_set_is_in_p (av_set_t set, vinsn_t vi)
2138
{
2139
  return av_set_lookup (set, vi) != NULL;
2140
}
2141
 
2142
/* Return a copy of SET.  */
2143
av_set_t
2144
av_set_copy (av_set_t set)
2145
{
2146
  expr_t expr;
2147
  av_set_iterator i;
2148
  av_set_t res = NULL;
2149
 
2150
  FOR_EACH_EXPR (expr, i, set)
2151
    av_set_add (&res, expr);
2152
 
2153
  return res;
2154
}
2155
 
2156
/* Join two av sets that do not have common elements by attaching second set
2157
   (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2158
   _AV_SET_NEXT of first set's last element).  */
2159
static void
2160
join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2161
{
2162
  gcc_assert (*to_tailp == NULL);
2163
  *to_tailp = *fromp;
2164
  *fromp = NULL;
2165
}
2166
 
2167
/* Makes set pointed to by TO to be the union of TO and FROM.  Clear av_set
2168
   pointed to by FROMP afterwards.  */
2169
void
2170
av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2171
{
2172
  expr_t expr1;
2173
  av_set_iterator i;
2174
 
2175
  /* Delete from TOP all exprs, that present in FROMP.  */
2176
  FOR_EACH_EXPR_1 (expr1, i, top)
2177
    {
2178
      expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2179
 
2180
      if (expr2)
2181
        {
2182
          merge_expr (expr2, expr1, insn);
2183
          av_set_iter_remove (&i);
2184
        }
2185
    }
2186
 
2187
  join_distinct_sets (i.lp, fromp);
2188
}
2189
 
2190
/* Same as above, but also update availability of target register in
2191
   TOP judging by TO_LV_SET and FROM_LV_SET.  */
2192
void
2193
av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2194
                       regset from_lv_set, insn_t insn)
2195
{
2196
  expr_t expr1;
2197
  av_set_iterator i;
2198
  av_set_t *to_tailp, in_both_set = NULL;
2199
 
2200
  /* Delete from TOP all expres, that present in FROMP.  */
2201
  FOR_EACH_EXPR_1 (expr1, i, top)
2202
    {
2203
      expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2204
 
2205
      if (expr2)
2206
        {
2207
          /* It may be that the expressions have different destination
2208
             registers, in which case we need to check liveness here.  */
2209
          if (EXPR_SEPARABLE_P (expr1))
2210
            {
2211
              int regno1 = (REG_P (EXPR_LHS (expr1))
2212
                            ? (int) expr_dest_regno (expr1) : -1);
2213
              int regno2 = (REG_P (EXPR_LHS (expr2))
2214
                            ? (int) expr_dest_regno (expr2) : -1);
2215
 
2216
              /* ??? We don't have a way to check restrictions for
2217
               *other* register on the current path, we did it only
2218
               for the current target register.  Give up.  */
2219
              if (regno1 != regno2)
2220
                EXPR_TARGET_AVAILABLE (expr2) = -1;
2221
            }
2222
          else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2223
            EXPR_TARGET_AVAILABLE (expr2) = -1;
2224
 
2225
          merge_expr (expr2, expr1, insn);
2226
          av_set_add_nocopy (&in_both_set, expr2);
2227
          av_set_iter_remove (&i);
2228
        }
2229
      else
2230
        /* EXPR1 is present in TOP, but not in FROMP.  Check it on
2231
           FROM_LV_SET.  */
2232
        set_unavailable_target_for_expr (expr1, from_lv_set);
2233
    }
2234
  to_tailp = i.lp;
2235
 
2236
  /* These expressions are not present in TOP.  Check liveness
2237
     restrictions on TO_LV_SET.  */
2238
  FOR_EACH_EXPR (expr1, i, *fromp)
2239
    set_unavailable_target_for_expr (expr1, to_lv_set);
2240
 
2241
  join_distinct_sets (i.lp, &in_both_set);
2242
  join_distinct_sets (to_tailp, fromp);
2243
}
2244
 
2245
/* Clear av_set pointed to by SETP.  */
2246
void
2247
av_set_clear (av_set_t *setp)
2248
{
2249
  expr_t expr;
2250
  av_set_iterator i;
2251
 
2252
  FOR_EACH_EXPR_1 (expr, i, setp)
2253
    av_set_iter_remove (&i);
2254
 
2255
  gcc_assert (*setp == NULL);
2256
}
2257
 
2258
/* Leave only one non-speculative element in the SETP.  */
2259
void
2260
av_set_leave_one_nonspec (av_set_t *setp)
2261
{
2262
  expr_t expr;
2263
  av_set_iterator i;
2264
  bool has_one_nonspec = false;
2265
 
2266
  /* Keep all speculative exprs, and leave one non-speculative
2267
     (the first one).  */
2268
  FOR_EACH_EXPR_1 (expr, i, setp)
2269
    {
2270
      if (!EXPR_SPEC_DONE_DS (expr))
2271
        {
2272
          if (has_one_nonspec)
2273
            av_set_iter_remove (&i);
2274
          else
2275
            has_one_nonspec = true;
2276
        }
2277
    }
2278
}
2279
 
2280
/* Return the N'th element of the SET.  */
2281
expr_t
2282
av_set_element (av_set_t set, int n)
2283
{
2284
  expr_t expr;
2285
  av_set_iterator i;
2286
 
2287
  FOR_EACH_EXPR (expr, i, set)
2288
    if (n-- == 0)
2289
      return expr;
2290
 
2291
  gcc_unreachable ();
2292
  return NULL;
2293
}
2294
 
2295
/* Deletes all expressions from AVP that are conditional branches (IFs).  */
2296
void
2297
av_set_substract_cond_branches (av_set_t *avp)
2298
{
2299
  av_set_iterator i;
2300
  expr_t expr;
2301
 
2302
  FOR_EACH_EXPR_1 (expr, i, avp)
2303
    if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2304
      av_set_iter_remove (&i);
2305
}
2306
 
2307
/* Multiplies usefulness attribute of each member of av-set *AVP by
2308
   value PROB / ALL_PROB.  */
2309
void
2310
av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2311
{
2312
  av_set_iterator i;
2313
  expr_t expr;
2314
 
2315
  FOR_EACH_EXPR (expr, i, av)
2316
    EXPR_USEFULNESS (expr) = (all_prob
2317
                              ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2318
                              : 0);
2319
}
2320
 
2321
/* Leave in AVP only those expressions, which are present in AV,
2322
   and return it.  */
2323
void
2324
av_set_intersect (av_set_t *avp, av_set_t av)
2325
{
2326
  av_set_iterator i;
2327
  expr_t expr;
2328
 
2329
  FOR_EACH_EXPR_1 (expr, i, avp)
2330
    if (av_set_lookup (av, EXPR_VINSN (expr)) == NULL)
2331
      av_set_iter_remove (&i);
2332
}
2333
 
2334
 
2335
 
2336
/* Dependence hooks to initialize insn data.  */
2337
 
2338
/* This is used in hooks callable from dependence analysis when initializing
2339
   instruction's data.  */
2340
static struct
2341
{
2342
  /* Where the dependence was found (lhs/rhs).  */
2343
  deps_where_t where;
2344
 
2345
  /* The actual data object to initialize.  */
2346
  idata_t id;
2347
 
2348
  /* True when the insn should not be made clonable.  */
2349
  bool force_unique_p;
2350
 
2351
  /* True when insn should be treated as of type USE, i.e. never renamed.  */
2352
  bool force_use_p;
2353
} deps_init_id_data;
2354
 
2355
 
2356
/* Setup ID for INSN.  FORCE_UNIQUE_P is true when INSN should not be
2357
   clonable.  */
2358
static void
2359
setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2360
{
2361
  int type;
2362
 
2363
  /* Determine whether INSN could be cloned and return appropriate vinsn type.
2364
     That clonable insns which can be separated into lhs and rhs have type SET.
2365
     Other clonable insns have type USE.  */
2366
  type = GET_CODE (insn);
2367
 
2368
  /* Only regular insns could be cloned.  */
2369
  if (type == INSN && !force_unique_p)
2370
    type = SET;
2371
  else if (type == JUMP_INSN && simplejump_p (insn))
2372
    type = PC;
2373
  else if (type == DEBUG_INSN)
2374
    type = !force_unique_p ? USE : INSN;
2375
 
2376
  IDATA_TYPE (id) = type;
2377
  IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2378
  IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2379
  IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2380
}
2381
 
2382
/* Start initializing insn data.  */
2383
static void
2384
deps_init_id_start_insn (insn_t insn)
2385
{
2386
  gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2387
 
2388
  setup_id_for_insn (deps_init_id_data.id, insn,
2389
                     deps_init_id_data.force_unique_p);
2390
  deps_init_id_data.where = DEPS_IN_INSN;
2391
}
2392
 
2393
/* Start initializing lhs data.  */
2394
static void
2395
deps_init_id_start_lhs (rtx lhs)
2396
{
2397
  gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2398
  gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2399
 
2400
  if (IDATA_TYPE (deps_init_id_data.id) == SET)
2401
    {
2402
      IDATA_LHS (deps_init_id_data.id) = lhs;
2403
      deps_init_id_data.where = DEPS_IN_LHS;
2404
    }
2405
}
2406
 
2407
/* Finish initializing lhs data.  */
2408
static void
2409
deps_init_id_finish_lhs (void)
2410
{
2411
  deps_init_id_data.where = DEPS_IN_INSN;
2412
}
2413
 
2414
/* Note a set of REGNO.  */
2415
static void
2416
deps_init_id_note_reg_set (int regno)
2417
{
2418
  haifa_note_reg_set (regno);
2419
 
2420
  if (deps_init_id_data.where == DEPS_IN_RHS)
2421
    deps_init_id_data.force_use_p = true;
2422
 
2423
  if (IDATA_TYPE (deps_init_id_data.id) != PC)
2424
    SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2425
 
2426
#ifdef STACK_REGS
2427
  /* Make instructions that set stack registers to be ineligible for
2428
     renaming to avoid issues with find_used_regs.  */
2429
  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2430
    deps_init_id_data.force_use_p = true;
2431
#endif
2432
}
2433
 
2434
/* Note a clobber of REGNO.  */
2435
static void
2436
deps_init_id_note_reg_clobber (int regno)
2437
{
2438
  haifa_note_reg_clobber (regno);
2439
 
2440
  if (deps_init_id_data.where == DEPS_IN_RHS)
2441
    deps_init_id_data.force_use_p = true;
2442
 
2443
  if (IDATA_TYPE (deps_init_id_data.id) != PC)
2444
    SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2445
}
2446
 
2447
/* Note a use of REGNO.  */
2448
static void
2449
deps_init_id_note_reg_use (int regno)
2450
{
2451
  haifa_note_reg_use (regno);
2452
 
2453
  if (IDATA_TYPE (deps_init_id_data.id) != PC)
2454
    SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2455
}
2456
 
2457
/* Start initializing rhs data.  */
2458
static void
2459
deps_init_id_start_rhs (rtx rhs)
2460
{
2461
  gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2462
 
2463
  /* And there was no sel_deps_reset_to_insn ().  */
2464
  if (IDATA_LHS (deps_init_id_data.id) != NULL)
2465
    {
2466
      IDATA_RHS (deps_init_id_data.id) = rhs;
2467
      deps_init_id_data.where = DEPS_IN_RHS;
2468
    }
2469
}
2470
 
2471
/* Finish initializing rhs data.  */
2472
static void
2473
deps_init_id_finish_rhs (void)
2474
{
2475
  gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2476
              || deps_init_id_data.where == DEPS_IN_INSN);
2477
  deps_init_id_data.where = DEPS_IN_INSN;
2478
}
2479
 
2480
/* Finish initializing insn data.  */
2481
static void
2482
deps_init_id_finish_insn (void)
2483
{
2484
  gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2485
 
2486
  if (IDATA_TYPE (deps_init_id_data.id) == SET)
2487
    {
2488
      rtx lhs = IDATA_LHS (deps_init_id_data.id);
2489
      rtx rhs = IDATA_RHS (deps_init_id_data.id);
2490
 
2491
      if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2492
          || deps_init_id_data.force_use_p)
2493
        {
2494
          /* This should be a USE, as we don't want to schedule its RHS
2495
             separately.  However, we still want to have them recorded
2496
             for the purposes of substitution.  That's why we don't
2497
             simply call downgrade_to_use () here.  */
2498
          gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2499
          gcc_assert (!lhs == !rhs);
2500
 
2501
          IDATA_TYPE (deps_init_id_data.id) = USE;
2502
        }
2503
    }
2504
 
2505
  deps_init_id_data.where = DEPS_IN_NOWHERE;
2506
}
2507
 
2508
/* This is dependence info used for initializing insn's data.  */
2509
static struct sched_deps_info_def deps_init_id_sched_deps_info;
2510
 
2511
/* This initializes most of the static part of the above structure.  */
2512
static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2513
  {
2514
    NULL,
2515
 
2516
    deps_init_id_start_insn,
2517
    deps_init_id_finish_insn,
2518
    deps_init_id_start_lhs,
2519
    deps_init_id_finish_lhs,
2520
    deps_init_id_start_rhs,
2521
    deps_init_id_finish_rhs,
2522
    deps_init_id_note_reg_set,
2523
    deps_init_id_note_reg_clobber,
2524
    deps_init_id_note_reg_use,
2525
    NULL, /* note_mem_dep */
2526
    NULL, /* note_dep */
2527
 
2528
    0, /* use_cselib */
2529
    0, /* use_deps_list */
2530
 
2531
  };
2532
 
2533
/* Initialize INSN's lhs and rhs in ID.  When FORCE_UNIQUE_P is true,
2534
   we don't actually need information about lhs and rhs.  */
2535
static void
2536
setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2537
{
2538
  rtx pat = PATTERN (insn);
2539
 
2540
  if (NONJUMP_INSN_P (insn)
2541
      && GET_CODE (pat) == SET
2542
      && !force_unique_p)
2543
    {
2544
      IDATA_RHS (id) = SET_SRC (pat);
2545
      IDATA_LHS (id) = SET_DEST (pat);
2546
    }
2547
  else
2548
    IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2549
}
2550
 
2551
/* Possibly downgrade INSN to USE.  */
2552
static void
2553
maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2554
{
2555
  bool must_be_use = false;
2556
  unsigned uid = INSN_UID (insn);
2557
  df_ref *rec;
2558
  rtx lhs = IDATA_LHS (id);
2559
  rtx rhs = IDATA_RHS (id);
2560
 
2561
  /* We downgrade only SETs.  */
2562
  if (IDATA_TYPE (id) != SET)
2563
    return;
2564
 
2565
  if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2566
    {
2567
      IDATA_TYPE (id) = USE;
2568
      return;
2569
    }
2570
 
2571
  for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2572
    {
2573
      df_ref def = *rec;
2574
 
2575
      if (DF_REF_INSN (def)
2576
          && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2577
          && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2578
        {
2579
          must_be_use = true;
2580
          break;
2581
        }
2582
 
2583
#ifdef STACK_REGS
2584
      /* Make instructions that set stack registers to be ineligible for
2585
         renaming to avoid issues with find_used_regs.  */
2586
      if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2587
        {
2588
          must_be_use = true;
2589
          break;
2590
        }
2591
#endif
2592
    }
2593
 
2594
  if (must_be_use)
2595
    IDATA_TYPE (id) = USE;
2596
}
2597
 
2598
/* Setup register sets describing INSN in ID.  */
2599
static void
2600
setup_id_reg_sets (idata_t id, insn_t insn)
2601
{
2602
  unsigned uid = INSN_UID (insn);
2603
  df_ref *rec;
2604
  regset tmp = get_clear_regset_from_pool ();
2605
 
2606
  for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2607
    {
2608
      df_ref def = *rec;
2609
      unsigned int regno = DF_REF_REGNO (def);
2610
 
2611
      /* Post modifies are treated like clobbers by sched-deps.c.  */
2612
      if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2613
                                     | DF_REF_PRE_POST_MODIFY)))
2614
        SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2615
      else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2616
        {
2617
          SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2618
 
2619
#ifdef STACK_REGS
2620
          /* For stack registers, treat writes to them as writes
2621
             to the first one to be consistent with sched-deps.c.  */
2622
          if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2623
            SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2624
#endif
2625
        }
2626
      /* Mark special refs that generate read/write def pair.  */
2627
      if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2628
          || regno == STACK_POINTER_REGNUM)
2629
        bitmap_set_bit (tmp, regno);
2630
    }
2631
 
2632
  for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2633
    {
2634
      df_ref use = *rec;
2635
      unsigned int regno = DF_REF_REGNO (use);
2636
 
2637
      /* When these refs are met for the first time, skip them, as
2638
         these uses are just counterparts of some defs.  */
2639
      if (bitmap_bit_p (tmp, regno))
2640
        bitmap_clear_bit (tmp, regno);
2641
      else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2642
        {
2643
          SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2644
 
2645
#ifdef STACK_REGS
2646
          /* For stack registers, treat reads from them as reads from
2647
             the first one to be consistent with sched-deps.c.  */
2648
          if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2649
            SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2650
#endif
2651
        }
2652
    }
2653
 
2654
  return_regset_to_pool (tmp);
2655
}
2656
 
2657
/* Initialize instruction data for INSN in ID using DF's data.  */
2658
static void
2659
init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2660
{
2661
  gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2662
 
2663
  setup_id_for_insn (id, insn, force_unique_p);
2664
  setup_id_lhs_rhs (id, insn, force_unique_p);
2665
 
2666
  if (INSN_NOP_P (insn))
2667
    return;
2668
 
2669
  maybe_downgrade_id_to_use (id, insn);
2670
  setup_id_reg_sets (id, insn);
2671
}
2672
 
2673
/* Initialize instruction data for INSN in ID.  */
2674
static void
2675
deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2676
{
2677
  struct deps_desc _dc, *dc = &_dc;
2678
 
2679
  deps_init_id_data.where = DEPS_IN_NOWHERE;
2680
  deps_init_id_data.id = id;
2681
  deps_init_id_data.force_unique_p = force_unique_p;
2682
  deps_init_id_data.force_use_p = false;
2683
 
2684
  init_deps (dc, false);
2685
 
2686
  memcpy (&deps_init_id_sched_deps_info,
2687
          &const_deps_init_id_sched_deps_info,
2688
          sizeof (deps_init_id_sched_deps_info));
2689
 
2690
  if (spec_info != NULL)
2691
    deps_init_id_sched_deps_info.generate_spec_deps = 1;
2692
 
2693
  sched_deps_info = &deps_init_id_sched_deps_info;
2694
 
2695
  deps_analyze_insn (dc, insn);
2696
 
2697
  free_deps (dc);
2698
 
2699
  deps_init_id_data.id = NULL;
2700
}
2701
 
2702
 
2703
 
2704
/* Implement hooks for collecting fundamental insn properties like if insn is
2705
   an ASM or is within a SCHED_GROUP.  */
2706
 
2707
/* True when a "one-time init" data for INSN was already inited.  */
2708
static bool
2709
first_time_insn_init (insn_t insn)
2710
{
2711
  return INSN_LIVE (insn) == NULL;
2712
}
2713
 
2714
/* Hash an entry in a transformed_insns hashtable.  */
2715
static hashval_t
2716
hash_transformed_insns (const void *p)
2717
{
2718
  return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2719
}
2720
 
2721
/* Compare the entries in a transformed_insns hashtable.  */
2722
static int
2723
eq_transformed_insns (const void *p, const void *q)
2724
{
2725
  rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2726
  rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2727
 
2728
  if (INSN_UID (i1) == INSN_UID (i2))
2729
    return 1;
2730
  return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2731
}
2732
 
2733
/* Free an entry in a transformed_insns hashtable.  */
2734
static void
2735
free_transformed_insns (void *p)
2736
{
2737
  struct transformed_insns *pti = (struct transformed_insns *) p;
2738
 
2739
  vinsn_detach (pti->vinsn_old);
2740
  vinsn_detach (pti->vinsn_new);
2741
  free (pti);
2742
}
2743
 
2744
/* Init the s_i_d data for INSN which should be inited just once, when
2745
   we first see the insn.  */
2746
static void
2747
init_first_time_insn_data (insn_t insn)
2748
{
2749
  /* This should not be set if this is the first time we init data for
2750
     insn.  */
2751
  gcc_assert (first_time_insn_init (insn));
2752
 
2753
  /* These are needed for nops too.  */
2754
  INSN_LIVE (insn) = get_regset_from_pool ();
2755
  INSN_LIVE_VALID_P (insn) = false;
2756
 
2757
  if (!INSN_NOP_P (insn))
2758
    {
2759
      INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2760
      INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2761
      INSN_TRANSFORMED_INSNS (insn)
2762
        = htab_create (16, hash_transformed_insns,
2763
                       eq_transformed_insns, free_transformed_insns);
2764
      init_deps (&INSN_DEPS_CONTEXT (insn), true);
2765
    }
2766
}
2767
 
2768
/* Free almost all above data for INSN that is scheduled already.
2769
   Used for extra-large basic blocks.  */
2770
void
2771
free_data_for_scheduled_insn (insn_t insn)
2772
{
2773
  gcc_assert (! first_time_insn_init (insn));
2774
 
2775
  if (! INSN_ANALYZED_DEPS (insn))
2776
    return;
2777
 
2778
  BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2779
  BITMAP_FREE (INSN_FOUND_DEPS (insn));
2780
  htab_delete (INSN_TRANSFORMED_INSNS (insn));
2781
 
2782
  /* This is allocated only for bookkeeping insns.  */
2783
  if (INSN_ORIGINATORS (insn))
2784
    BITMAP_FREE (INSN_ORIGINATORS (insn));
2785
  free_deps (&INSN_DEPS_CONTEXT (insn));
2786
 
2787
  INSN_ANALYZED_DEPS (insn) = NULL;
2788
 
2789
  /* Clear the readonly flag so we would ICE when trying to recalculate
2790
     the deps context (as we believe that it should not happen).  */
2791
  (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2792
}
2793
 
2794
/* Free the same data as above for INSN.  */
2795
static void
2796
free_first_time_insn_data (insn_t insn)
2797
{
2798
  gcc_assert (! first_time_insn_init (insn));
2799
 
2800
  free_data_for_scheduled_insn (insn);
2801
  return_regset_to_pool (INSN_LIVE (insn));
2802
  INSN_LIVE (insn) = NULL;
2803
  INSN_LIVE_VALID_P (insn) = false;
2804
}
2805
 
2806
/* Initialize region-scope data structures for basic blocks.  */
2807
static void
2808
init_global_and_expr_for_bb (basic_block bb)
2809
{
2810
  if (sel_bb_empty_p (bb))
2811
    return;
2812
 
2813
  invalidate_av_set (bb);
2814
}
2815
 
2816
/* Data for global dependency analysis (to initialize CANT_MOVE and
2817
   SCHED_GROUP_P).  */
2818
static struct
2819
{
2820
  /* Previous insn.  */
2821
  insn_t prev_insn;
2822
} init_global_data;
2823
 
2824
/* Determine if INSN is in the sched_group, is an asm or should not be
2825
   cloned.  After that initialize its expr.  */
2826
static void
2827
init_global_and_expr_for_insn (insn_t insn)
2828
{
2829
  if (LABEL_P (insn))
2830
    return;
2831
 
2832
  if (NOTE_INSN_BASIC_BLOCK_P (insn))
2833
    {
2834
      init_global_data.prev_insn = NULL_RTX;
2835
      return;
2836
    }
2837
 
2838
  gcc_assert (INSN_P (insn));
2839
 
2840
  if (SCHED_GROUP_P (insn))
2841
    /* Setup a sched_group.  */
2842
    {
2843
      insn_t prev_insn = init_global_data.prev_insn;
2844
 
2845
      if (prev_insn)
2846
        INSN_SCHED_NEXT (prev_insn) = insn;
2847
 
2848
      init_global_data.prev_insn = insn;
2849
    }
2850
  else
2851
    init_global_data.prev_insn = NULL_RTX;
2852
 
2853
  if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2854
      || asm_noperands (PATTERN (insn)) >= 0)
2855
    /* Mark INSN as an asm.  */
2856
    INSN_ASM_P (insn) = true;
2857
 
2858
  {
2859
    bool force_unique_p;
2860
    ds_t spec_done_ds;
2861
 
2862
    /* Certain instructions cannot be cloned.  */
2863
    if (CANT_MOVE (insn)
2864
        || INSN_ASM_P (insn)
2865
        || SCHED_GROUP_P (insn)
2866
        || prologue_epilogue_contains (insn)
2867
        /* Exception handling insns are always unique.  */
2868
        || (flag_non_call_exceptions && can_throw_internal (insn))
2869
        /* TRAP_IF though have an INSN code is control_flow_insn_p ().  */
2870
        || control_flow_insn_p (insn))
2871
      force_unique_p = true;
2872
    else
2873
      force_unique_p = false;
2874
 
2875
    if (targetm.sched.get_insn_spec_ds)
2876
      {
2877
        spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2878
        spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2879
      }
2880
    else
2881
      spec_done_ds = 0;
2882
 
2883
    /* Initialize INSN's expr.  */
2884
    init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
2885
               REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
2886
               spec_done_ds, 0, 0, NULL, true, false, false, false,
2887
               CANT_MOVE (insn));
2888
  }
2889
 
2890
  init_first_time_insn_data (insn);
2891
}
2892
 
2893
/* Scan the region and initialize instruction data for basic blocks BBS.  */
2894
void
2895
sel_init_global_and_expr (bb_vec_t bbs)
2896
{
2897
  /* ??? It would be nice to implement push / pop scheme for sched_infos.  */
2898
  const struct sched_scan_info_def ssi =
2899
    {
2900
      NULL, /* extend_bb */
2901
      init_global_and_expr_for_bb, /* init_bb */
2902
      extend_insn_data, /* extend_insn */
2903
      init_global_and_expr_for_insn /* init_insn */
2904
    };
2905
 
2906
  sched_scan (&ssi, bbs, NULL, NULL, NULL);
2907
}
2908
 
2909
/* Finalize region-scope data structures for basic blocks.  */
2910
static void
2911
finish_global_and_expr_for_bb (basic_block bb)
2912
{
2913
  av_set_clear (&BB_AV_SET (bb));
2914
  BB_AV_LEVEL (bb) = 0;
2915
}
2916
 
2917
/* Finalize INSN's data.  */
2918
static void
2919
finish_global_and_expr_insn (insn_t insn)
2920
{
2921
  if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
2922
    return;
2923
 
2924
  gcc_assert (INSN_P (insn));
2925
 
2926
  if (INSN_LUID (insn) > 0)
2927
    {
2928
      free_first_time_insn_data (insn);
2929
      INSN_WS_LEVEL (insn) = 0;
2930
      CANT_MOVE (insn) = 0;
2931
 
2932
      /* We can no longer assert this, as vinsns of this insn could be
2933
         easily live in other insn's caches.  This should be changed to
2934
         a counter-like approach among all vinsns.  */
2935
      gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
2936
      clear_expr (INSN_EXPR (insn));
2937
    }
2938
}
2939
 
2940
/* Finalize per instruction data for the whole region.  */
2941
void
2942
sel_finish_global_and_expr (void)
2943
{
2944
  {
2945
    bb_vec_t bbs;
2946
    int i;
2947
 
2948
    bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
2949
 
2950
    for (i = 0; i < current_nr_blocks; i++)
2951
      VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
2952
 
2953
    /* Clear AV_SETs and INSN_EXPRs.  */
2954
    {
2955
      const struct sched_scan_info_def ssi =
2956
        {
2957
          NULL, /* extend_bb */
2958
          finish_global_and_expr_for_bb, /* init_bb */
2959
          NULL, /* extend_insn */
2960
          finish_global_and_expr_insn /* init_insn */
2961
        };
2962
 
2963
      sched_scan (&ssi, bbs, NULL, NULL, NULL);
2964
    }
2965
 
2966
    VEC_free (basic_block, heap, bbs);
2967
  }
2968
 
2969
  finish_insns ();
2970
}
2971
 
2972
 
2973
/* In the below hooks, we merely calculate whether or not a dependence
2974
   exists, and in what part of insn.  However, we will need more data
2975
   when we'll start caching dependence requests.  */
2976
 
2977
/* Container to hold information for dependency analysis.  */
2978
static struct
2979
{
2980
  deps_t dc;
2981
 
2982
  /* A variable to track which part of rtx we are scanning in
2983
     sched-deps.c: sched_analyze_insn ().  */
2984
  deps_where_t where;
2985
 
2986
  /* Current producer.  */
2987
  insn_t pro;
2988
 
2989
  /* Current consumer.  */
2990
  vinsn_t con;
2991
 
2992
  /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
2993
     X is from { INSN, LHS, RHS }.  */
2994
  ds_t has_dep_p[DEPS_IN_NOWHERE];
2995
} has_dependence_data;
2996
 
2997
/* Start analyzing dependencies of INSN.  */
2998
static void
2999
has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3000
{
3001
  gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3002
 
3003
  has_dependence_data.where = DEPS_IN_INSN;
3004
}
3005
 
3006
/* Finish analyzing dependencies of an insn.  */
3007
static void
3008
has_dependence_finish_insn (void)
3009
{
3010
  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3011
 
3012
  has_dependence_data.where = DEPS_IN_NOWHERE;
3013
}
3014
 
3015
/* Start analyzing dependencies of LHS.  */
3016
static void
3017
has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3018
{
3019
  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3020
 
3021
  if (VINSN_LHS (has_dependence_data.con) != NULL)
3022
    has_dependence_data.where = DEPS_IN_LHS;
3023
}
3024
 
3025
/* Finish analyzing dependencies of an lhs.  */
3026
static void
3027
has_dependence_finish_lhs (void)
3028
{
3029
  has_dependence_data.where = DEPS_IN_INSN;
3030
}
3031
 
3032
/* Start analyzing dependencies of RHS.  */
3033
static void
3034
has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3035
{
3036
  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3037
 
3038
  if (VINSN_RHS (has_dependence_data.con) != NULL)
3039
    has_dependence_data.where = DEPS_IN_RHS;
3040
}
3041
 
3042
/* Start analyzing dependencies of an rhs.  */
3043
static void
3044
has_dependence_finish_rhs (void)
3045
{
3046
  gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3047
              || has_dependence_data.where == DEPS_IN_INSN);
3048
 
3049
  has_dependence_data.where = DEPS_IN_INSN;
3050
}
3051
 
3052
/* Note a set of REGNO.  */
3053
static void
3054
has_dependence_note_reg_set (int regno)
3055
{
3056
  struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3057
 
3058
  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3059
                                       VINSN_INSN_RTX
3060
                                       (has_dependence_data.con)))
3061
    {
3062
      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3063
 
3064
      if (reg_last->sets != NULL
3065
          || reg_last->clobbers != NULL)
3066
        *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3067
 
3068
      if (reg_last->uses)
3069
        *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3070
    }
3071
}
3072
 
3073
/* Note a clobber of REGNO.  */
3074
static void
3075
has_dependence_note_reg_clobber (int regno)
3076
{
3077
  struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3078
 
3079
  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3080
                                       VINSN_INSN_RTX
3081
                                       (has_dependence_data.con)))
3082
    {
3083
      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3084
 
3085
      if (reg_last->sets)
3086
        *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3087
 
3088
      if (reg_last->uses)
3089
        *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3090
    }
3091
}
3092
 
3093
/* Note a use of REGNO.  */
3094
static void
3095
has_dependence_note_reg_use (int regno)
3096
{
3097
  struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3098
 
3099
  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3100
                                       VINSN_INSN_RTX
3101
                                       (has_dependence_data.con)))
3102
    {
3103
      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3104
 
3105
      if (reg_last->sets)
3106
        *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3107
 
3108
      if (reg_last->clobbers)
3109
        *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3110
 
3111
      /* Handle BE_IN_SPEC.  */
3112
      if (reg_last->uses)
3113
        {
3114
          ds_t pro_spec_checked_ds;
3115
 
3116
          pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3117
          pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3118
 
3119
          if (pro_spec_checked_ds != 0)
3120
            /* Merge BE_IN_SPEC bits into *DSP.  */
3121
            *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3122
                                  NULL_RTX, NULL_RTX);
3123
        }
3124
    }
3125
}
3126
 
3127
/* Note a memory dependence.  */
3128
static void
3129
has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3130
                             rtx pending_mem ATTRIBUTE_UNUSED,
3131
                             insn_t pending_insn ATTRIBUTE_UNUSED,
3132
                             ds_t ds ATTRIBUTE_UNUSED)
3133
{
3134
  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3135
                                       VINSN_INSN_RTX (has_dependence_data.con)))
3136
    {
3137
      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3138
 
3139
      *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3140
    }
3141
}
3142
 
3143
/* Note a dependence.  */
3144
static void
3145
has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3146
                         ds_t ds ATTRIBUTE_UNUSED)
3147
{
3148
  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3149
                                       VINSN_INSN_RTX (has_dependence_data.con)))
3150
    {
3151
      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3152
 
3153
      *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3154
    }
3155
}
3156
 
3157
/* Mark the insn as having a hard dependence that prevents speculation.  */
3158
void
3159
sel_mark_hard_insn (rtx insn)
3160
{
3161
  int i;
3162
 
3163
  /* Only work when we're in has_dependence_p mode.
3164
     ??? This is a hack, this should actually be a hook.  */
3165
  if (!has_dependence_data.dc || !has_dependence_data.pro)
3166
    return;
3167
 
3168
  gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3169
  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3170
 
3171
  for (i = 0; i < DEPS_IN_NOWHERE; i++)
3172
    has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3173
}
3174
 
3175
/* This structure holds the hooks for the dependency analysis used when
3176
   actually processing dependencies in the scheduler.  */
3177
static struct sched_deps_info_def has_dependence_sched_deps_info;
3178
 
3179
/* This initializes most of the fields of the above structure.  */
3180
static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3181
  {
3182
    NULL,
3183
 
3184
    has_dependence_start_insn,
3185
    has_dependence_finish_insn,
3186
    has_dependence_start_lhs,
3187
    has_dependence_finish_lhs,
3188
    has_dependence_start_rhs,
3189
    has_dependence_finish_rhs,
3190
    has_dependence_note_reg_set,
3191
    has_dependence_note_reg_clobber,
3192
    has_dependence_note_reg_use,
3193
    has_dependence_note_mem_dep,
3194
    has_dependence_note_dep,
3195
 
3196
    0, /* use_cselib */
3197
    0, /* use_deps_list */
3198
 
3199
  };
3200
 
3201
/* Initialize has_dependence_sched_deps_info with extra spec field.  */
3202
static void
3203
setup_has_dependence_sched_deps_info (void)
3204
{
3205
  memcpy (&has_dependence_sched_deps_info,
3206
          &const_has_dependence_sched_deps_info,
3207
          sizeof (has_dependence_sched_deps_info));
3208
 
3209
  if (spec_info != NULL)
3210
    has_dependence_sched_deps_info.generate_spec_deps = 1;
3211
 
3212
  sched_deps_info = &has_dependence_sched_deps_info;
3213
}
3214
 
3215
/* Remove all dependences found and recorded in has_dependence_data array.  */
3216
void
3217
sel_clear_has_dependence (void)
3218
{
3219
  int i;
3220
 
3221
  for (i = 0; i < DEPS_IN_NOWHERE; i++)
3222
    has_dependence_data.has_dep_p[i] = 0;
3223
}
3224
 
3225
/* Return nonzero if EXPR has is dependent upon PRED.  Return the pointer
3226
   to the dependence information array in HAS_DEP_PP.  */
3227
ds_t
3228
has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3229
{
3230
  int i;
3231
  ds_t ds;
3232
  struct deps_desc *dc;
3233
 
3234
  if (INSN_SIMPLEJUMP_P (pred))
3235
    /* Unconditional jump is just a transfer of control flow.
3236
       Ignore it.  */
3237
    return false;
3238
 
3239
  dc = &INSN_DEPS_CONTEXT (pred);
3240
 
3241
  /* We init this field lazily.  */
3242
  if (dc->reg_last == NULL)
3243
    init_deps_reg_last (dc);
3244
 
3245
  if (!dc->readonly)
3246
    {
3247
      has_dependence_data.pro = NULL;
3248
      /* Initialize empty dep context with information about PRED.  */
3249
      advance_deps_context (dc, pred);
3250
      dc->readonly = 1;
3251
    }
3252
 
3253
  has_dependence_data.where = DEPS_IN_NOWHERE;
3254
  has_dependence_data.pro = pred;
3255
  has_dependence_data.con = EXPR_VINSN (expr);
3256
  has_dependence_data.dc = dc;
3257
 
3258
  sel_clear_has_dependence ();
3259
 
3260
  /* Now catch all dependencies that would be generated between PRED and
3261
     INSN.  */
3262
  setup_has_dependence_sched_deps_info ();
3263
  deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3264
  has_dependence_data.dc = NULL;
3265
 
3266
  /* When a barrier was found, set DEPS_IN_INSN bits.  */
3267
  if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3268
    has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3269
  else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3270
    has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3271
 
3272
  /* Do not allow stores to memory to move through checks.  Currently
3273
     we don't move this to sched-deps.c as the check doesn't have
3274
     obvious places to which this dependence can be attached.
3275
     FIMXE: this should go to a hook.  */
3276
  if (EXPR_LHS (expr)
3277
      && MEM_P (EXPR_LHS (expr))
3278
      && sel_insn_is_speculation_check (pred))
3279
    has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3280
 
3281
  *has_dep_pp = has_dependence_data.has_dep_p;
3282
  ds = 0;
3283
  for (i = 0; i < DEPS_IN_NOWHERE; i++)
3284
    ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3285
                        NULL_RTX, NULL_RTX);
3286
 
3287
  return ds;
3288
}
3289
 
3290
 
3291
/* Dependence hooks implementation that checks dependence latency constraints
3292
   on the insns being scheduled.  The entry point for these routines is
3293
   tick_check_p predicate.  */
3294
 
3295
static struct
3296
{
3297
  /* An expr we are currently checking.  */
3298
  expr_t expr;
3299
 
3300
  /* A minimal cycle for its scheduling.  */
3301
  int cycle;
3302
 
3303
  /* Whether we have seen a true dependence while checking.  */
3304
  bool seen_true_dep_p;
3305
} tick_check_data;
3306
 
3307
/* Update minimal scheduling cycle for tick_check_insn given that it depends
3308
   on PRO with status DS and weight DW.  */
3309
static void
3310
tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3311
{
3312
  expr_t con_expr = tick_check_data.expr;
3313
  insn_t con_insn = EXPR_INSN_RTX (con_expr);
3314
 
3315
  if (con_insn != pro_insn)
3316
    {
3317
      enum reg_note dt;
3318
      int tick;
3319
 
3320
      if (/* PROducer was removed from above due to pipelining.  */
3321
          !INSN_IN_STREAM_P (pro_insn)
3322
          /* Or PROducer was originally on the next iteration regarding the
3323
             CONsumer.  */
3324
          || (INSN_SCHED_TIMES (pro_insn)
3325
              - EXPR_SCHED_TIMES (con_expr)) > 1)
3326
        /* Don't count this dependence.  */
3327
        return;
3328
 
3329
      dt = ds_to_dt (ds);
3330
      if (dt == REG_DEP_TRUE)
3331
        tick_check_data.seen_true_dep_p = true;
3332
 
3333
      gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3334
 
3335
      {
3336
        dep_def _dep, *dep = &_dep;
3337
 
3338
        init_dep (dep, pro_insn, con_insn, dt);
3339
 
3340
        tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3341
      }
3342
 
3343
      /* When there are several kinds of dependencies between pro and con,
3344
         only REG_DEP_TRUE should be taken into account.  */
3345
      if (tick > tick_check_data.cycle
3346
          && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3347
        tick_check_data.cycle = tick;
3348
    }
3349
}
3350
 
3351
/* An implementation of note_dep hook.  */
3352
static void
3353
tick_check_note_dep (insn_t pro, ds_t ds)
3354
{
3355
  tick_check_dep_with_dw (pro, ds, 0);
3356
}
3357
 
3358
/* An implementation of note_mem_dep hook.  */
3359
static void
3360
tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3361
{
3362
  dw_t dw;
3363
 
3364
  dw = (ds_to_dt (ds) == REG_DEP_TRUE
3365
        ? estimate_dep_weak (mem1, mem2)
3366
        : 0);
3367
 
3368
  tick_check_dep_with_dw (pro, ds, dw);
3369
}
3370
 
3371
/* This structure contains hooks for dependence analysis used when determining
3372
   whether an insn is ready for scheduling.  */
3373
static struct sched_deps_info_def tick_check_sched_deps_info =
3374
  {
3375
    NULL,
3376
 
3377
    NULL,
3378
    NULL,
3379
    NULL,
3380
    NULL,
3381
    NULL,
3382
    NULL,
3383
    haifa_note_reg_set,
3384
    haifa_note_reg_clobber,
3385
    haifa_note_reg_use,
3386
    tick_check_note_mem_dep,
3387
    tick_check_note_dep,
3388
 
3389
    0, 0, 0
3390
  };
3391
 
3392
/* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3393
   scheduled.  Return 0 if all data from producers in DC is ready.  */
3394
int
3395
tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3396
{
3397
  int cycles_left;
3398
  /* Initialize variables.  */
3399
  tick_check_data.expr = expr;
3400
  tick_check_data.cycle = 0;
3401
  tick_check_data.seen_true_dep_p = false;
3402
  sched_deps_info = &tick_check_sched_deps_info;
3403
 
3404
  gcc_assert (!dc->readonly);
3405
  dc->readonly = 1;
3406
  deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3407
  dc->readonly = 0;
3408
 
3409
  cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3410
 
3411
  return cycles_left >= 0 ? cycles_left : 0;
3412
}
3413
 
3414
 
3415
/* Functions to work with insns.  */
3416
 
3417
/* Returns true if LHS of INSN is the same as DEST of an insn
3418
   being moved.  */
3419
bool
3420
lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3421
{
3422
  rtx lhs = INSN_LHS (insn);
3423
 
3424
  if (lhs == NULL || dest == NULL)
3425
    return false;
3426
 
3427
  return rtx_equal_p (lhs, dest);
3428
}
3429
 
3430
/* Return s_i_d entry of INSN.  Callable from debugger.  */
3431
sel_insn_data_def
3432
insn_sid (insn_t insn)
3433
{
3434
  return *SID (insn);
3435
}
3436
 
3437
/* True when INSN is a speculative check.  We can tell this by looking
3438
   at the data structures of the selective scheduler, not by examining
3439
   the pattern.  */
3440
bool
3441
sel_insn_is_speculation_check (rtx insn)
3442
{
3443
  return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3444
}
3445
 
3446
/* Extracts machine mode MODE and destination location DST_LOC
3447
   for given INSN.  */
3448
void
3449
get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3450
{
3451
  rtx pat = PATTERN (insn);
3452
 
3453
  gcc_assert (dst_loc);
3454
  gcc_assert (GET_CODE (pat) == SET);
3455
 
3456
  *dst_loc = SET_DEST (pat);
3457
 
3458
  gcc_assert (*dst_loc);
3459
  gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3460
 
3461
  if (mode)
3462
    *mode = GET_MODE (*dst_loc);
3463
}
3464
 
3465
/* Returns true when moving through JUMP will result in bookkeeping
3466
   creation.  */
3467
bool
3468
bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3469
{
3470
  insn_t succ;
3471
  succ_iterator si;
3472
 
3473
  FOR_EACH_SUCC (succ, si, jump)
3474
    if (sel_num_cfg_preds_gt_1 (succ))
3475
      return true;
3476
 
3477
  return false;
3478
}
3479
 
3480
/* Return 'true' if INSN is the only one in its basic block.  */
3481
static bool
3482
insn_is_the_only_one_in_bb_p (insn_t insn)
3483
{
3484
  return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3485
}
3486
 
3487
#ifdef ENABLE_CHECKING
3488
/* Check that the region we're scheduling still has at most one
3489
   backedge.  */
3490
static void
3491
verify_backedges (void)
3492
{
3493
  if (pipelining_p)
3494
    {
3495
      int i, n = 0;
3496
      edge e;
3497
      edge_iterator ei;
3498
 
3499
      for (i = 0; i < current_nr_blocks; i++)
3500
        FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3501
          if (in_current_region_p (e->dest)
3502
              && BLOCK_TO_BB (e->dest->index) < i)
3503
            n++;
3504
 
3505
      gcc_assert (n <= 1);
3506
    }
3507
}
3508
#endif
3509
 
3510
 
3511
/* Functions to work with control flow.  */
3512
 
3513
/* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3514
   are sorted in topological order (it might have been invalidated by
3515
   redirecting an edge).  */
3516
static void
3517
sel_recompute_toporder (void)
3518
{
3519
  int i, n, rgn;
3520
  int *postorder, n_blocks;
3521
 
3522
  postorder = XALLOCAVEC (int, n_basic_blocks);
3523
  n_blocks = post_order_compute (postorder, false, false);
3524
 
3525
  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3526
  for (n = 0, i = n_blocks - 1; i >= 0; i--)
3527
    if (CONTAINING_RGN (postorder[i]) == rgn)
3528
      {
3529
        BLOCK_TO_BB (postorder[i]) = n;
3530
        BB_TO_BLOCK (n) = postorder[i];
3531
        n++;
3532
      }
3533
 
3534
  /* Assert that we updated info for all blocks.  We may miss some blocks if
3535
     this function is called when redirecting an edge made a block
3536
     unreachable, but that block is not deleted yet.  */
3537
  gcc_assert (n == RGN_NR_BLOCKS (rgn));
3538
}
3539
 
3540
/* Tidy the possibly empty block BB.  */
3541
static bool
3542
maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p)
3543
{
3544
  basic_block succ_bb, pred_bb;
3545
  edge e;
3546
  edge_iterator ei;
3547
  bool rescan_p;
3548
 
3549
  /* Keep empty bb only if this block immediately precedes EXIT and
3550
     has incoming non-fallthrough edge, or it has no predecessors or
3551
     successors.  Otherwise remove it.  */
3552
  if (!sel_bb_empty_p (bb)
3553
      || (single_succ_p (bb)
3554
          && single_succ (bb) == EXIT_BLOCK_PTR
3555
          && (!single_pred_p (bb)
3556
              || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3557
      || EDGE_COUNT (bb->preds) == 0
3558
      || EDGE_COUNT (bb->succs) == 0)
3559
    return false;
3560
 
3561
  /* Do not attempt to redirect complex edges.  */
3562
  FOR_EACH_EDGE (e, ei, bb->preds)
3563
    if (e->flags & EDGE_COMPLEX)
3564
      return false;
3565
 
3566
  free_data_sets (bb);
3567
 
3568
  /* Do not delete BB if it has more than one successor.
3569
     That can occur when we moving a jump.  */
3570
  if (!single_succ_p (bb))
3571
    {
3572
      gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3573
      sel_merge_blocks (bb->prev_bb, bb);
3574
      return true;
3575
    }
3576
 
3577
  succ_bb = single_succ (bb);
3578
  rescan_p = true;
3579
  pred_bb = NULL;
3580
 
3581
  /* Redirect all non-fallthru edges to the next bb.  */
3582
  while (rescan_p)
3583
    {
3584
      rescan_p = false;
3585
 
3586
      FOR_EACH_EDGE (e, ei, bb->preds)
3587
        {
3588
          pred_bb = e->src;
3589
 
3590
          if (!(e->flags & EDGE_FALLTHRU))
3591
            {
3592
              recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb);
3593
              rescan_p = true;
3594
              break;
3595
            }
3596
        }
3597
    }
3598
 
3599
  /* If it is possible - merge BB with its predecessor.  */
3600
  if (can_merge_blocks_p (bb->prev_bb, bb))
3601
    sel_merge_blocks (bb->prev_bb, bb);
3602
  else
3603
    /* Otherwise this is a block without fallthru predecessor.
3604
       Just delete it.  */
3605
    {
3606
      gcc_assert (pred_bb != NULL);
3607
 
3608
      if (in_current_region_p (pred_bb))
3609
        move_bb_info (pred_bb, bb);
3610
      remove_empty_bb (bb, true);
3611
    }
3612
 
3613
  if (recompute_toporder_p)
3614
    sel_recompute_toporder ();
3615
 
3616
#ifdef ENABLE_CHECKING
3617
  verify_backedges ();
3618
#endif
3619
 
3620
  return true;
3621
}
3622
 
3623
/* Tidy the control flow after we have removed original insn from
3624
   XBB.  Return true if we have removed some blocks.  When FULL_TIDYING
3625
   is true, also try to optimize control flow on non-empty blocks.  */
3626
bool
3627
tidy_control_flow (basic_block xbb, bool full_tidying)
3628
{
3629
  bool changed = true;
3630
  insn_t first, last;
3631
 
3632
  /* First check whether XBB is empty.  */
3633
  changed = maybe_tidy_empty_bb (xbb, false);
3634
  if (changed || !full_tidying)
3635
    return changed;
3636
 
3637
  /* Check if there is a unnecessary jump after insn left.  */
3638
  if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
3639
      && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3640
      && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3641
    {
3642
      if (sel_remove_insn (BB_END (xbb), false, false))
3643
        return true;
3644
      tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3645
    }
3646
 
3647
  first = sel_bb_head (xbb);
3648
  last = sel_bb_end (xbb);
3649
  if (MAY_HAVE_DEBUG_INSNS)
3650
    {
3651
      if (first != last && DEBUG_INSN_P (first))
3652
        do
3653
          first = NEXT_INSN (first);
3654
        while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3655
 
3656
      if (first != last && DEBUG_INSN_P (last))
3657
        do
3658
          last = PREV_INSN (last);
3659
        while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3660
    }
3661
  /* Check if there is an unnecessary jump in previous basic block leading
3662
     to next basic block left after removing INSN from stream.
3663
     If it is so, remove that jump and redirect edge to current
3664
     basic block (where there was INSN before deletion).  This way
3665
     when NOP will be deleted several instructions later with its
3666
     basic block we will not get a jump to next instruction, which
3667
     can be harmful.  */
3668
  if (first == last
3669
      && !sel_bb_empty_p (xbb)
3670
      && INSN_NOP_P (last)
3671
      /* Flow goes fallthru from current block to the next.  */
3672
      && EDGE_COUNT (xbb->succs) == 1
3673
      && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3674
      /* When successor is an EXIT block, it may not be the next block.  */
3675
      && single_succ (xbb) != EXIT_BLOCK_PTR
3676
      /* And unconditional jump in previous basic block leads to
3677
         next basic block of XBB and this jump can be safely removed.  */
3678
      && in_current_region_p (xbb->prev_bb)
3679
      && jump_leads_only_to_bb_p (BB_END (xbb->prev_bb), xbb->next_bb)
3680
      && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3681
      /* Also this jump is not at the scheduling boundary.  */
3682
      && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3683
    {
3684
      bool recompute_toporder_p;
3685
      /* Clear data structures of jump - jump itself will be removed
3686
         by sel_redirect_edge_and_branch.  */
3687
      clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3688
      recompute_toporder_p
3689
        = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3690
 
3691
      gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3692
 
3693
      /* It can turn out that after removing unused jump, basic block
3694
         that contained that jump, becomes empty too.  In such case
3695
         remove it too.  */
3696
      if (sel_bb_empty_p (xbb->prev_bb))
3697
        changed = maybe_tidy_empty_bb (xbb->prev_bb, recompute_toporder_p);
3698
      else if (recompute_toporder_p)
3699
        sel_recompute_toporder ();
3700
    }
3701
 
3702
  return changed;
3703
}
3704
 
3705
/* Purge meaningless empty blocks in the middle of a region.  */
3706
void
3707
purge_empty_blocks (void)
3708
{
3709
  /* Do not attempt to delete preheader.  */
3710
  int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
3711
 
3712
  while (i < current_nr_blocks)
3713
    {
3714
      basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3715
 
3716
      if (maybe_tidy_empty_bb (b, false))
3717
        continue;
3718
 
3719
      i++;
3720
    }
3721
}
3722
 
3723
/* Rip-off INSN from the insn stream.  When ONLY_DISCONNECT is true,
3724
   do not delete insn's data, because it will be later re-emitted.
3725
   Return true if we have removed some blocks afterwards.  */
3726
bool
3727
sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3728
{
3729
  basic_block bb = BLOCK_FOR_INSN (insn);
3730
 
3731
  gcc_assert (INSN_IN_STREAM_P (insn));
3732
 
3733
  if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3734
    {
3735
      expr_t expr;
3736
      av_set_iterator i;
3737
 
3738
      /* When we remove a debug insn that is head of a BB, it remains
3739
         in the AV_SET of the block, but it shouldn't.  */
3740
      FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3741
        if (EXPR_INSN_RTX (expr) == insn)
3742
          {
3743
            av_set_iter_remove (&i);
3744
            break;
3745
          }
3746
    }
3747
 
3748
  if (only_disconnect)
3749
    {
3750
      insn_t prev = PREV_INSN (insn);
3751
      insn_t next = NEXT_INSN (insn);
3752
      basic_block bb = BLOCK_FOR_INSN (insn);
3753
 
3754
      NEXT_INSN (prev) = next;
3755
      PREV_INSN (next) = prev;
3756
 
3757
      if (BB_HEAD (bb) == insn)
3758
        {
3759
          gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3760
          BB_HEAD (bb) = prev;
3761
        }
3762
      if (BB_END (bb) == insn)
3763
        BB_END (bb) = prev;
3764
    }
3765
  else
3766
    {
3767
      remove_insn (insn);
3768
      clear_expr (INSN_EXPR (insn));
3769
    }
3770
 
3771
  /* It is necessary to null this fields before calling add_insn ().  */
3772
  PREV_INSN (insn) = NULL_RTX;
3773
  NEXT_INSN (insn) = NULL_RTX;
3774
 
3775
  return tidy_control_flow (bb, full_tidying);
3776
}
3777
 
3778
/* Estimate number of the insns in BB.  */
3779
static int
3780
sel_estimate_number_of_insns (basic_block bb)
3781
{
3782
  int res = 0;
3783
  insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3784
 
3785
  for (; insn != next_tail; insn = NEXT_INSN (insn))
3786
    if (NONDEBUG_INSN_P (insn))
3787
      res++;
3788
 
3789
  return res;
3790
}
3791
 
3792
/* We don't need separate luids for notes or labels.  */
3793
static int
3794
sel_luid_for_non_insn (rtx x)
3795
{
3796
  gcc_assert (NOTE_P (x) || LABEL_P (x));
3797
 
3798
  return -1;
3799
}
3800
 
3801
/* Return seqno of the only predecessor of INSN.  */
3802
static int
3803
get_seqno_of_a_pred (insn_t insn)
3804
{
3805
  int seqno;
3806
 
3807
  gcc_assert (INSN_SIMPLEJUMP_P (insn));
3808
 
3809
  if (!sel_bb_head_p (insn))
3810
    seqno = INSN_SEQNO (PREV_INSN (insn));
3811
  else
3812
    {
3813
      basic_block bb = BLOCK_FOR_INSN (insn);
3814
 
3815
      if (single_pred_p (bb)
3816
          && !in_current_region_p (single_pred (bb)))
3817
        {
3818
          /* We can have preds outside a region when splitting edges
3819
             for pipelining of an outer loop.  Use succ instead.
3820
             There should be only one of them.  */
3821
          insn_t succ = NULL;
3822
          succ_iterator si;
3823
          bool first = true;
3824
 
3825
          gcc_assert (flag_sel_sched_pipelining_outer_loops
3826
                      && current_loop_nest);
3827
          FOR_EACH_SUCC_1 (succ, si, insn,
3828
                           SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
3829
            {
3830
              gcc_assert (first);
3831
              first = false;
3832
            }
3833
 
3834
          gcc_assert (succ != NULL);
3835
          seqno = INSN_SEQNO (succ);
3836
        }
3837
      else
3838
        {
3839
          insn_t *preds;
3840
          int n;
3841
 
3842
          cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
3843
          gcc_assert (n == 1);
3844
 
3845
          seqno = INSN_SEQNO (preds[0]);
3846
 
3847
          free (preds);
3848
        }
3849
    }
3850
 
3851
  return seqno;
3852
}
3853
 
3854
/*  Find the proper seqno for inserting at INSN.  Returns -1 if no predecessors
3855
    with positive seqno exist.  */
3856
int
3857
get_seqno_by_preds (rtx insn)
3858
{
3859
  basic_block bb = BLOCK_FOR_INSN (insn);
3860
  rtx tmp = insn, head = BB_HEAD (bb);
3861
  insn_t *preds;
3862
  int n, i, seqno;
3863
 
3864
  while (tmp != head)
3865
    if (INSN_P (tmp))
3866
      return INSN_SEQNO (tmp);
3867
    else
3868
      tmp = PREV_INSN (tmp);
3869
 
3870
  cfg_preds (bb, &preds, &n);
3871
  for (i = 0, seqno = -1; i < n; i++)
3872
    seqno = MAX (seqno, INSN_SEQNO (preds[i]));
3873
 
3874
  return seqno;
3875
}
3876
 
3877
 
3878
 
3879
/* Extend pass-scope data structures for basic blocks.  */
3880
void
3881
sel_extend_global_bb_info (void)
3882
{
3883
  VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
3884
                         last_basic_block);
3885
}
3886
 
3887
/* Extend region-scope data structures for basic blocks.  */
3888
static void
3889
extend_region_bb_info (void)
3890
{
3891
  VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
3892
                         last_basic_block);
3893
}
3894
 
3895
/* Extend all data structures to fit for all basic blocks.  */
3896
static void
3897
extend_bb_info (void)
3898
{
3899
  sel_extend_global_bb_info ();
3900
  extend_region_bb_info ();
3901
}
3902
 
3903
/* Finalize pass-scope data structures for basic blocks.  */
3904
void
3905
sel_finish_global_bb_info (void)
3906
{
3907
  VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
3908
}
3909
 
3910
/* Finalize region-scope data structures for basic blocks.  */
3911
static void
3912
finish_region_bb_info (void)
3913
{
3914
  VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
3915
}
3916
 
3917
 
3918
/* Data for each insn in current region.  */
3919
VEC (sel_insn_data_def, heap) *s_i_d = NULL;
3920
 
3921
/* A vector for the insns we've emitted.  */
3922
static insn_vec_t new_insns = NULL;
3923
 
3924
/* Extend data structures for insns from current region.  */
3925
static void
3926
extend_insn_data (void)
3927
{
3928
  int reserve;
3929
 
3930
  sched_extend_target ();
3931
  sched_deps_init (false);
3932
 
3933
  /* Extend data structures for insns from current region.  */
3934
  reserve = (sched_max_luid + 1
3935
             - VEC_length (sel_insn_data_def, s_i_d));
3936
  if (reserve > 0
3937
      && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
3938
    {
3939
      int size;
3940
 
3941
      if (sched_max_luid / 2 > 1024)
3942
        size = sched_max_luid + 1024;
3943
      else
3944
        size = 3 * sched_max_luid / 2;
3945
 
3946
 
3947
      VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
3948
    }
3949
}
3950
 
3951
/* Finalize data structures for insns from current region.  */
3952
static void
3953
finish_insns (void)
3954
{
3955
  unsigned i;
3956
 
3957
  /* Clear here all dependence contexts that may have left from insns that were
3958
     removed during the scheduling.  */
3959
  for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
3960
    {
3961
      sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
3962
 
3963
      if (sid_entry->live)
3964
        return_regset_to_pool (sid_entry->live);
3965
      if (sid_entry->analyzed_deps)
3966
        {
3967
          BITMAP_FREE (sid_entry->analyzed_deps);
3968
          BITMAP_FREE (sid_entry->found_deps);
3969
          htab_delete (sid_entry->transformed_insns);
3970
          free_deps (&sid_entry->deps_context);
3971
        }
3972
      if (EXPR_VINSN (&sid_entry->expr))
3973
        {
3974
          clear_expr (&sid_entry->expr);
3975
 
3976
          /* Also, clear CANT_MOVE bit here, because we really don't want it
3977
             to be passed to the next region.  */
3978
          CANT_MOVE_BY_LUID (i) = 0;
3979
        }
3980
    }
3981
 
3982
  VEC_free (sel_insn_data_def, heap, s_i_d);
3983
}
3984
 
3985
/* A proxy to pass initialization data to init_insn ().  */
3986
static sel_insn_data_def _insn_init_ssid;
3987
static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
3988
 
3989
/* If true create a new vinsn.  Otherwise use the one from EXPR.  */
3990
static bool insn_init_create_new_vinsn_p;
3991
 
3992
/* Set all necessary data for initialization of the new insn[s].  */
3993
static expr_t
3994
set_insn_init (expr_t expr, vinsn_t vi, int seqno)
3995
{
3996
  expr_t x = &insn_init_ssid->expr;
3997
 
3998
  copy_expr_onside (x, expr);
3999
  if (vi != NULL)
4000
    {
4001
      insn_init_create_new_vinsn_p = false;
4002
      change_vinsn_in_expr (x, vi);
4003
    }
4004
  else
4005
    insn_init_create_new_vinsn_p = true;
4006
 
4007
  insn_init_ssid->seqno = seqno;
4008
  return x;
4009
}
4010
 
4011
/* Init data for INSN.  */
4012
static void
4013
init_insn_data (insn_t insn)
4014
{
4015
  expr_t expr;
4016
  sel_insn_data_t ssid = insn_init_ssid;
4017
 
4018
  /* The fields mentioned below are special and hence are not being
4019
     propagated to the new insns.  */
4020
  gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4021
              && !ssid->after_stall_p && ssid->sched_cycle == 0);
4022
  gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4023
 
4024
  expr = INSN_EXPR (insn);
4025
  copy_expr (expr, &ssid->expr);
4026
  prepare_insn_expr (insn, ssid->seqno);
4027
 
4028
  if (insn_init_create_new_vinsn_p)
4029
    change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4030
 
4031
  if (first_time_insn_init (insn))
4032
    init_first_time_insn_data (insn);
4033
}
4034
 
4035
/* This is used to initialize spurious jumps generated by
4036
   sel_redirect_edge ().  */
4037
static void
4038
init_simplejump_data (insn_t insn)
4039
{
4040
  init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4041
             REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
4042
             false, true);
4043
  INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
4044
  init_first_time_insn_data (insn);
4045
}
4046
 
4047
/* Perform deferred initialization of insns.  This is used to process
4048
   a new jump that may be created by redirect_edge.  */
4049
void
4050
sel_init_new_insn (insn_t insn, int flags)
4051
{
4052
  /* We create data structures for bb when the first insn is emitted in it.  */
4053
  if (INSN_P (insn)
4054
      && INSN_IN_STREAM_P (insn)
4055
      && insn_is_the_only_one_in_bb_p (insn))
4056
    {
4057
      extend_bb_info ();
4058
      create_initial_data_sets (BLOCK_FOR_INSN (insn));
4059
    }
4060
 
4061
  if (flags & INSN_INIT_TODO_LUID)
4062
    sched_init_luids (NULL, NULL, NULL, insn);
4063
 
4064
  if (flags & INSN_INIT_TODO_SSID)
4065
    {
4066
      extend_insn_data ();
4067
      init_insn_data (insn);
4068
      clear_expr (&insn_init_ssid->expr);
4069
    }
4070
 
4071
  if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4072
    {
4073
      extend_insn_data ();
4074
      init_simplejump_data (insn);
4075
    }
4076
 
4077
  gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4078
              == CONTAINING_RGN (BB_TO_BLOCK (0)));
4079
}
4080
 
4081
 
4082
/* Functions to init/finish work with lv sets.  */
4083
 
4084
/* Init BB_LV_SET of BB from DF_LR_IN set of BB.  */
4085
static void
4086
init_lv_set (basic_block bb)
4087
{
4088
  gcc_assert (!BB_LV_SET_VALID_P (bb));
4089
 
4090
  BB_LV_SET (bb) = get_regset_from_pool ();
4091
  COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4092
  BB_LV_SET_VALID_P (bb) = true;
4093
}
4094
 
4095
/* Copy liveness information to BB from FROM_BB.  */
4096
static void
4097
copy_lv_set_from (basic_block bb, basic_block from_bb)
4098
{
4099
  gcc_assert (!BB_LV_SET_VALID_P (bb));
4100
 
4101
  COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4102
  BB_LV_SET_VALID_P (bb) = true;
4103
}
4104
 
4105
/* Initialize lv set of all bb headers.  */
4106
void
4107
init_lv_sets (void)
4108
{
4109
  basic_block bb;
4110
 
4111
  /* Initialize of LV sets.  */
4112
  FOR_EACH_BB (bb)
4113
    init_lv_set (bb);
4114
 
4115
  /* Don't forget EXIT_BLOCK.  */
4116
  init_lv_set (EXIT_BLOCK_PTR);
4117
}
4118
 
4119
/* Release lv set of HEAD.  */
4120
static void
4121
free_lv_set (basic_block bb)
4122
{
4123
  gcc_assert (BB_LV_SET (bb) != NULL);
4124
 
4125
  return_regset_to_pool (BB_LV_SET (bb));
4126
  BB_LV_SET (bb) = NULL;
4127
  BB_LV_SET_VALID_P (bb) = false;
4128
}
4129
 
4130
/* Finalize lv sets of all bb headers.  */
4131
void
4132
free_lv_sets (void)
4133
{
4134
  basic_block bb;
4135
 
4136
  /* Don't forget EXIT_BLOCK.  */
4137
  free_lv_set (EXIT_BLOCK_PTR);
4138
 
4139
  /* Free LV sets.  */
4140
  FOR_EACH_BB (bb)
4141
    if (BB_LV_SET (bb))
4142
      free_lv_set (bb);
4143
}
4144
 
4145
/* Initialize an invalid AV_SET for BB.
4146
   This set will be updated next time compute_av () process BB.  */
4147
static void
4148
invalidate_av_set (basic_block bb)
4149
{
4150
  gcc_assert (BB_AV_LEVEL (bb) <= 0
4151
              && BB_AV_SET (bb) == NULL);
4152
 
4153
  BB_AV_LEVEL (bb) = -1;
4154
}
4155
 
4156
/* Create initial data sets for BB (they will be invalid).  */
4157
static void
4158
create_initial_data_sets (basic_block bb)
4159
{
4160
  if (BB_LV_SET (bb))
4161
    BB_LV_SET_VALID_P (bb) = false;
4162
  else
4163
    BB_LV_SET (bb) = get_regset_from_pool ();
4164
  invalidate_av_set (bb);
4165
}
4166
 
4167
/* Free av set of BB.  */
4168
static void
4169
free_av_set (basic_block bb)
4170
{
4171
  av_set_clear (&BB_AV_SET (bb));
4172
  BB_AV_LEVEL (bb) = 0;
4173
}
4174
 
4175
/* Free data sets of BB.  */
4176
void
4177
free_data_sets (basic_block bb)
4178
{
4179
  free_lv_set (bb);
4180
  free_av_set (bb);
4181
}
4182
 
4183
/* Exchange lv sets of TO and FROM.  */
4184
static void
4185
exchange_lv_sets (basic_block to, basic_block from)
4186
{
4187
  {
4188
    regset to_lv_set = BB_LV_SET (to);
4189
 
4190
    BB_LV_SET (to) = BB_LV_SET (from);
4191
    BB_LV_SET (from) = to_lv_set;
4192
  }
4193
 
4194
  {
4195
    bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4196
 
4197
    BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4198
    BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4199
  }
4200
}
4201
 
4202
 
4203
/* Exchange av sets of TO and FROM.  */
4204
static void
4205
exchange_av_sets (basic_block to, basic_block from)
4206
{
4207
  {
4208
    av_set_t to_av_set = BB_AV_SET (to);
4209
 
4210
    BB_AV_SET (to) = BB_AV_SET (from);
4211
    BB_AV_SET (from) = to_av_set;
4212
  }
4213
 
4214
  {
4215
    int to_av_level = BB_AV_LEVEL (to);
4216
 
4217
    BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4218
    BB_AV_LEVEL (from) = to_av_level;
4219
  }
4220
}
4221
 
4222
/* Exchange data sets of TO and FROM.  */
4223
void
4224
exchange_data_sets (basic_block to, basic_block from)
4225
{
4226
  exchange_lv_sets (to, from);
4227
  exchange_av_sets (to, from);
4228
}
4229
 
4230
/* Copy data sets of FROM to TO.  */
4231
void
4232
copy_data_sets (basic_block to, basic_block from)
4233
{
4234
  gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4235
  gcc_assert (BB_AV_SET (to) == NULL);
4236
 
4237
  BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4238
  BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4239
 
4240
  if (BB_AV_SET_VALID_P (from))
4241
    {
4242
      BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4243
    }
4244
  if (BB_LV_SET_VALID_P (from))
4245
    {
4246
      gcc_assert (BB_LV_SET (to) != NULL);
4247
      COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4248
    }
4249
}
4250
 
4251
/* Return an av set for INSN, if any.  */
4252
av_set_t
4253
get_av_set (insn_t insn)
4254
{
4255
  av_set_t av_set;
4256
 
4257
  gcc_assert (AV_SET_VALID_P (insn));
4258
 
4259
  if (sel_bb_head_p (insn))
4260
    av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4261
  else
4262
    av_set = NULL;
4263
 
4264
  return av_set;
4265
}
4266
 
4267
/* Implementation of AV_LEVEL () macro.  Return AV_LEVEL () of INSN.  */
4268
int
4269
get_av_level (insn_t insn)
4270
{
4271
  int av_level;
4272
 
4273
  gcc_assert (INSN_P (insn));
4274
 
4275
  if (sel_bb_head_p (insn))
4276
    av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4277
  else
4278
    av_level = INSN_WS_LEVEL (insn);
4279
 
4280
  return av_level;
4281
}
4282
 
4283
 
4284
 
4285
/* Variables to work with control-flow graph.  */
4286
 
4287
/* The basic block that already has been processed by the sched_data_update (),
4288
   but hasn't been in sel_add_bb () yet.  */
4289
static VEC (basic_block, heap) *last_added_blocks = NULL;
4290
 
4291
/* A pool for allocating successor infos.  */
4292
static struct
4293
{
4294
  /* A stack for saving succs_info structures.  */
4295
  struct succs_info *stack;
4296
 
4297
  /* Its size.  */
4298
  int size;
4299
 
4300
  /* Top of the stack.  */
4301
  int top;
4302
 
4303
  /* Maximal value of the top.  */
4304
  int max_top;
4305
}  succs_info_pool;
4306
 
4307
/* Functions to work with control-flow graph.  */
4308
 
4309
/* Return basic block note of BB.  */
4310
insn_t
4311
sel_bb_head (basic_block bb)
4312
{
4313
  insn_t head;
4314
 
4315
  if (bb == EXIT_BLOCK_PTR)
4316
    {
4317
      gcc_assert (exit_insn != NULL_RTX);
4318
      head = exit_insn;
4319
    }
4320
  else
4321
    {
4322
      insn_t note;
4323
 
4324
      note = bb_note (bb);
4325
      head = next_nonnote_insn (note);
4326
 
4327
      if (head && BLOCK_FOR_INSN (head) != bb)
4328
        head = NULL_RTX;
4329
    }
4330
 
4331
  return head;
4332
}
4333
 
4334
/* Return true if INSN is a basic block header.  */
4335
bool
4336
sel_bb_head_p (insn_t insn)
4337
{
4338
  return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4339
}
4340
 
4341
/* Return last insn of BB.  */
4342
insn_t
4343
sel_bb_end (basic_block bb)
4344
{
4345
  if (sel_bb_empty_p (bb))
4346
    return NULL_RTX;
4347
 
4348
  gcc_assert (bb != EXIT_BLOCK_PTR);
4349
 
4350
  return BB_END (bb);
4351
}
4352
 
4353
/* Return true if INSN is the last insn in its basic block.  */
4354
bool
4355
sel_bb_end_p (insn_t insn)
4356
{
4357
  return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4358
}
4359
 
4360
/* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK.  */
4361
bool
4362
sel_bb_empty_p (basic_block bb)
4363
{
4364
  return sel_bb_head (bb) == NULL;
4365
}
4366
 
4367
/* True when BB belongs to the current scheduling region.  */
4368
bool
4369
in_current_region_p (basic_block bb)
4370
{
4371
  if (bb->index < NUM_FIXED_BLOCKS)
4372
    return false;
4373
 
4374
  return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4375
}
4376
 
4377
/* Return the block which is a fallthru bb of a conditional jump JUMP.  */
4378
basic_block
4379
fallthru_bb_of_jump (rtx jump)
4380
{
4381
  if (!JUMP_P (jump))
4382
    return NULL;
4383
 
4384
  if (any_uncondjump_p (jump))
4385
    return single_succ (BLOCK_FOR_INSN (jump));
4386
 
4387
  if (!any_condjump_p (jump))
4388
    return NULL;
4389
 
4390
  /* A basic block that ends with a conditional jump may still have one successor
4391
     (and be followed by a barrier), we are not interested.  */
4392
  if (single_succ_p (BLOCK_FOR_INSN (jump)))
4393
    return NULL;
4394
 
4395
  return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4396
}
4397
 
4398
/* Remove all notes from BB.  */
4399
static void
4400
init_bb (basic_block bb)
4401
{
4402
  remove_notes (bb_note (bb), BB_END (bb));
4403
  BB_NOTE_LIST (bb) = note_list;
4404
}
4405
 
4406
void
4407
sel_init_bbs (bb_vec_t bbs, basic_block bb)
4408
{
4409
  const struct sched_scan_info_def ssi =
4410
    {
4411
      extend_bb_info, /* extend_bb */
4412
      init_bb, /* init_bb */
4413
      NULL, /* extend_insn */
4414
      NULL /* init_insn */
4415
    };
4416
 
4417
  sched_scan (&ssi, bbs, bb, new_insns, NULL);
4418
}
4419
 
4420
/* Restore notes for the whole region.  */
4421
static void
4422
sel_restore_notes (void)
4423
{
4424
  int bb;
4425
  insn_t insn;
4426
 
4427
  for (bb = 0; bb < current_nr_blocks; bb++)
4428
    {
4429
      basic_block first, last;
4430
 
4431
      first = EBB_FIRST_BB (bb);
4432
      last = EBB_LAST_BB (bb)->next_bb;
4433
 
4434
      do
4435
        {
4436
          note_list = BB_NOTE_LIST (first);
4437
          restore_other_notes (NULL, first);
4438
          BB_NOTE_LIST (first) = NULL_RTX;
4439
 
4440
          FOR_BB_INSNS (first, insn)
4441
            if (NONDEBUG_INSN_P (insn))
4442
              reemit_notes (insn);
4443
 
4444
          first = first->next_bb;
4445
        }
4446
      while (first != last);
4447
    }
4448
}
4449
 
4450
/* Free per-bb data structures.  */
4451
void
4452
sel_finish_bbs (void)
4453
{
4454
  sel_restore_notes ();
4455
 
4456
  /* Remove current loop preheader from this loop.  */
4457
  if (current_loop_nest)
4458
    sel_remove_loop_preheader ();
4459
 
4460
  finish_region_bb_info ();
4461
}
4462
 
4463
/* Return true if INSN has a single successor of type FLAGS.  */
4464
bool
4465
sel_insn_has_single_succ_p (insn_t insn, int flags)
4466
{
4467
  insn_t succ;
4468
  succ_iterator si;
4469
  bool first_p = true;
4470
 
4471
  FOR_EACH_SUCC_1 (succ, si, insn, flags)
4472
    {
4473
      if (first_p)
4474
        first_p = false;
4475
      else
4476
        return false;
4477
    }
4478
 
4479
  return true;
4480
}
4481
 
4482
/* Allocate successor's info.  */
4483
static struct succs_info *
4484
alloc_succs_info (void)
4485
{
4486
  if (succs_info_pool.top == succs_info_pool.max_top)
4487
    {
4488
      int i;
4489
 
4490
      if (++succs_info_pool.max_top >= succs_info_pool.size)
4491
        gcc_unreachable ();
4492
 
4493
      i = ++succs_info_pool.top;
4494
      succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4495
      succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4496
      succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4497
    }
4498
  else
4499
    succs_info_pool.top++;
4500
 
4501
  return &succs_info_pool.stack[succs_info_pool.top];
4502
}
4503
 
4504
/* Free successor's info.  */
4505
void
4506
free_succs_info (struct succs_info * sinfo)
4507
{
4508
  gcc_assert (succs_info_pool.top >= 0
4509
              && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4510
  succs_info_pool.top--;
4511
 
4512
  /* Clear stale info.  */
4513
  VEC_block_remove (rtx, sinfo->succs_ok,
4514
                    0, VEC_length (rtx, sinfo->succs_ok));
4515
  VEC_block_remove (rtx, sinfo->succs_other,
4516
                    0, VEC_length (rtx, sinfo->succs_other));
4517
  VEC_block_remove (int, sinfo->probs_ok,
4518
                    0, VEC_length (int, sinfo->probs_ok));
4519
  sinfo->all_prob = 0;
4520
  sinfo->succs_ok_n = 0;
4521
  sinfo->all_succs_n = 0;
4522
}
4523
 
4524
/* Compute successor info for INSN.  FLAGS are the flags passed
4525
   to the FOR_EACH_SUCC_1 iterator.  */
4526
struct succs_info *
4527
compute_succs_info (insn_t insn, short flags)
4528
{
4529
  succ_iterator si;
4530
  insn_t succ;
4531
  struct succs_info *sinfo = alloc_succs_info ();
4532
 
4533
  /* Traverse *all* successors and decide what to do with each.  */
4534
  FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4535
    {
4536
      /* FIXME: this doesn't work for skipping to loop exits, as we don't
4537
         perform code motion through inner loops.  */
4538
      short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4539
 
4540
      if (current_flags & flags)
4541
        {
4542
          VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4543
          VEC_safe_push (int, heap, sinfo->probs_ok,
4544
                         /* FIXME: Improve calculation when skipping
4545
                            inner loop to exits.  */
4546
                         (si.bb_end
4547
                          ? si.e1->probability
4548
                          : REG_BR_PROB_BASE));
4549
          sinfo->succs_ok_n++;
4550
        }
4551
      else
4552
        VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4553
 
4554
      /* Compute all_prob.  */
4555
      if (!si.bb_end)
4556
        sinfo->all_prob = REG_BR_PROB_BASE;
4557
      else
4558
        sinfo->all_prob += si.e1->probability;
4559
 
4560
      sinfo->all_succs_n++;
4561
    }
4562
 
4563
  return sinfo;
4564
}
4565
 
4566
/* Return the predecessors of BB in PREDS and their number in N.
4567
   Empty blocks are skipped.  SIZE is used to allocate PREDS.  */
4568
static void
4569
cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4570
{
4571
  edge e;
4572
  edge_iterator ei;
4573
 
4574
  gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4575
 
4576
  FOR_EACH_EDGE (e, ei, bb->preds)
4577
    {
4578
      basic_block pred_bb = e->src;
4579
      insn_t bb_end = BB_END (pred_bb);
4580
 
4581
      /* ??? This code is not supposed to walk out of a region.  */
4582
      gcc_assert (in_current_region_p (pred_bb));
4583
 
4584
      if (sel_bb_empty_p (pred_bb))
4585
        cfg_preds_1 (pred_bb, preds, n, size);
4586
      else
4587
        {
4588
          if (*n == *size)
4589
            *preds = XRESIZEVEC (insn_t, *preds,
4590
                                 (*size = 2 * *size + 1));
4591
          (*preds)[(*n)++] = bb_end;
4592
        }
4593
    }
4594
 
4595
  gcc_assert (*n != 0);
4596
}
4597
 
4598
/* Find all predecessors of BB and record them in PREDS and their number
4599
   in N.  Empty blocks are skipped, and only normal (forward in-region)
4600
   edges are processed.  */
4601
static void
4602
cfg_preds (basic_block bb, insn_t **preds, int *n)
4603
{
4604
  int size = 0;
4605
 
4606
  *preds = NULL;
4607
  *n = 0;
4608
  cfg_preds_1 (bb, preds, n, &size);
4609
}
4610
 
4611
/* Returns true if we are moving INSN through join point.  */
4612
bool
4613
sel_num_cfg_preds_gt_1 (insn_t insn)
4614
{
4615
  basic_block bb;
4616
 
4617
  if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4618
    return false;
4619
 
4620
  bb = BLOCK_FOR_INSN (insn);
4621
 
4622
  while (1)
4623
    {
4624
      if (EDGE_COUNT (bb->preds) > 1)
4625
        return true;
4626
 
4627
      gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4628
      bb = EDGE_PRED (bb, 0)->src;
4629
 
4630
      if (!sel_bb_empty_p (bb))
4631
        break;
4632
    }
4633
 
4634
  return false;
4635
}
4636
 
4637
/* Returns true when BB should be the end of an ebb.  Adapted from the
4638
   code in sched-ebb.c.  */
4639
bool
4640
bb_ends_ebb_p (basic_block bb)
4641
{
4642
  basic_block next_bb = bb_next_bb (bb);
4643
  edge e;
4644
  edge_iterator ei;
4645
 
4646
  if (next_bb == EXIT_BLOCK_PTR
4647
      || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4648
      || (LABEL_P (BB_HEAD (next_bb))
4649
          /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4650
             Work around that.  */
4651
          && !single_pred_p (next_bb)))
4652
    return true;
4653
 
4654
  if (!in_current_region_p (next_bb))
4655
    return true;
4656
 
4657
  FOR_EACH_EDGE (e, ei, bb->succs)
4658
    if ((e->flags & EDGE_FALLTHRU) != 0)
4659
      {
4660
        gcc_assert (e->dest == next_bb);
4661
 
4662
        return false;
4663
      }
4664
 
4665
  return true;
4666
}
4667
 
4668
/* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4669
   successor of INSN.  */
4670
bool
4671
in_same_ebb_p (insn_t insn, insn_t succ)
4672
{
4673
  basic_block ptr = BLOCK_FOR_INSN (insn);
4674
 
4675
  for(;;)
4676
    {
4677
      if (ptr == BLOCK_FOR_INSN (succ))
4678
        return true;
4679
 
4680
      if (bb_ends_ebb_p (ptr))
4681
        return false;
4682
 
4683
      ptr = bb_next_bb (ptr);
4684
    }
4685
 
4686
  gcc_unreachable ();
4687
  return false;
4688
}
4689
 
4690
/* Recomputes the reverse topological order for the function and
4691
   saves it in REV_TOP_ORDER_INDEX.  REV_TOP_ORDER_INDEX_LEN is also
4692
   modified appropriately.  */
4693
static void
4694
recompute_rev_top_order (void)
4695
{
4696
  int *postorder;
4697
  int n_blocks, i;
4698
 
4699
  if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4700
    {
4701
      rev_top_order_index_len = last_basic_block;
4702
      rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4703
                                        rev_top_order_index_len);
4704
    }
4705
 
4706
  postorder = XNEWVEC (int, n_basic_blocks);
4707
 
4708
  n_blocks = post_order_compute (postorder, true, false);
4709
  gcc_assert (n_basic_blocks == n_blocks);
4710
 
4711
  /* Build reverse function: for each basic block with BB->INDEX == K
4712
     rev_top_order_index[K] is it's reverse topological sort number.  */
4713
  for (i = 0; i < n_blocks; i++)
4714
    {
4715
      gcc_assert (postorder[i] < rev_top_order_index_len);
4716
      rev_top_order_index[postorder[i]] = i;
4717
    }
4718
 
4719
  free (postorder);
4720
}
4721
 
4722
/* Clear all flags from insns in BB that could spoil its rescheduling.  */
4723
void
4724
clear_outdated_rtx_info (basic_block bb)
4725
{
4726
  rtx insn;
4727
 
4728
  FOR_BB_INSNS (bb, insn)
4729
    if (INSN_P (insn))
4730
      {
4731
        SCHED_GROUP_P (insn) = 0;
4732
        INSN_AFTER_STALL_P (insn) = 0;
4733
        INSN_SCHED_TIMES (insn) = 0;
4734
        EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4735
 
4736
        /* We cannot use the changed caches, as previously we could ignore
4737
           the LHS dependence due to enabled renaming and transform
4738
           the expression, and currently we'll be unable to do this.  */
4739
        htab_empty (INSN_TRANSFORMED_INSNS (insn));
4740
      }
4741
}
4742
 
4743
/* Add BB_NOTE to the pool of available basic block notes.  */
4744
static void
4745
return_bb_to_pool (basic_block bb)
4746
{
4747
  rtx note = bb_note (bb);
4748
 
4749
  gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4750
              && bb->aux == NULL);
4751
 
4752
  /* It turns out that current cfg infrastructure does not support
4753
     reuse of basic blocks.  Don't bother for now.  */
4754
  /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4755
}
4756
 
4757
/* Get a bb_note from pool or return NULL_RTX if pool is empty.  */
4758
static rtx
4759
get_bb_note_from_pool (void)
4760
{
4761
  if (VEC_empty (rtx, bb_note_pool))
4762
    return NULL_RTX;
4763
  else
4764
    {
4765
      rtx note = VEC_pop (rtx, bb_note_pool);
4766
 
4767
      PREV_INSN (note) = NULL_RTX;
4768
      NEXT_INSN (note) = NULL_RTX;
4769
 
4770
      return note;
4771
    }
4772
}
4773
 
4774
/* Free bb_note_pool.  */
4775
void
4776
free_bb_note_pool (void)
4777
{
4778
  VEC_free (rtx, heap, bb_note_pool);
4779
}
4780
 
4781
/* Setup scheduler pool and successor structure.  */
4782
void
4783
alloc_sched_pools (void)
4784
{
4785
  int succs_size;
4786
 
4787
  succs_size = MAX_WS + 1;
4788
  succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4789
  succs_info_pool.size = succs_size;
4790
  succs_info_pool.top = -1;
4791
  succs_info_pool.max_top = -1;
4792
 
4793
  sched_lists_pool = create_alloc_pool ("sel-sched-lists",
4794
                                        sizeof (struct _list_node), 500);
4795
}
4796
 
4797
/* Free the pools.  */
4798
void
4799
free_sched_pools (void)
4800
{
4801
  int i;
4802
 
4803
  free_alloc_pool (sched_lists_pool);
4804
  gcc_assert (succs_info_pool.top == -1);
4805
  for (i = 0; i < succs_info_pool.max_top; i++)
4806
    {
4807
      VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4808
      VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4809
      VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
4810
    }
4811
  free (succs_info_pool.stack);
4812
}
4813
 
4814
 
4815
/* Returns a position in RGN where BB can be inserted retaining
4816
   topological order.  */
4817
static int
4818
find_place_to_insert_bb (basic_block bb, int rgn)
4819
{
4820
  bool has_preds_outside_rgn = false;
4821
  edge e;
4822
  edge_iterator ei;
4823
 
4824
  /* Find whether we have preds outside the region.  */
4825
  FOR_EACH_EDGE (e, ei, bb->preds)
4826
    if (!in_current_region_p (e->src))
4827
      {
4828
        has_preds_outside_rgn = true;
4829
        break;
4830
      }
4831
 
4832
  /* Recompute the top order -- needed when we have > 1 pred
4833
     and in case we don't have preds outside.  */
4834
  if (flag_sel_sched_pipelining_outer_loops
4835
      && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
4836
    {
4837
      int i, bbi = bb->index, cur_bbi;
4838
 
4839
      recompute_rev_top_order ();
4840
      for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
4841
        {
4842
          cur_bbi = BB_TO_BLOCK (i);
4843
          if (rev_top_order_index[bbi]
4844
              < rev_top_order_index[cur_bbi])
4845
            break;
4846
        }
4847
 
4848
      /* We skipped the right block, so we increase i.  We accomodate
4849
         it for increasing by step later, so we decrease i.  */
4850
      return (i + 1) - 1;
4851
    }
4852
  else if (has_preds_outside_rgn)
4853
    {
4854
      /* This is the case when we generate an extra empty block
4855
         to serve as region head during pipelining.  */
4856
      e = EDGE_SUCC (bb, 0);
4857
      gcc_assert (EDGE_COUNT (bb->succs) == 1
4858
                  && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
4859
                  && (BLOCK_TO_BB (e->dest->index) == 0));
4860
      return -1;
4861
    }
4862
 
4863
  /* We don't have preds outside the region.  We should have
4864
     the only pred, because the multiple preds case comes from
4865
     the pipelining of outer loops, and that is handled above.
4866
     Just take the bbi of this single pred.  */
4867
  if (EDGE_COUNT (bb->succs) > 0)
4868
    {
4869
      int pred_bbi;
4870
 
4871
      gcc_assert (EDGE_COUNT (bb->preds) == 1);
4872
 
4873
      pred_bbi = EDGE_PRED (bb, 0)->src->index;
4874
      return BLOCK_TO_BB (pred_bbi);
4875
    }
4876
  else
4877
    /* BB has no successors.  It is safe to put it in the end.  */
4878
    return current_nr_blocks - 1;
4879
}
4880
 
4881
/* Deletes an empty basic block freeing its data.  */
4882
static void
4883
delete_and_free_basic_block (basic_block bb)
4884
{
4885
  gcc_assert (sel_bb_empty_p (bb));
4886
 
4887
  if (BB_LV_SET (bb))
4888
    free_lv_set (bb);
4889
 
4890
  bitmap_clear_bit (blocks_to_reschedule, bb->index);
4891
 
4892
  /* Can't assert av_set properties because we use sel_aremove_bb
4893
     when removing loop preheader from the region.  At the point of
4894
     removing the preheader we already have deallocated sel_region_bb_info.  */
4895
  gcc_assert (BB_LV_SET (bb) == NULL
4896
              && !BB_LV_SET_VALID_P (bb)
4897
              && BB_AV_LEVEL (bb) == 0
4898
              && BB_AV_SET (bb) == NULL);
4899
 
4900
  delete_basic_block (bb);
4901
}
4902
 
4903
/* Add BB to the current region and update the region data.  */
4904
static void
4905
add_block_to_current_region (basic_block bb)
4906
{
4907
  int i, pos, bbi = -2, rgn;
4908
 
4909
  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4910
  bbi = find_place_to_insert_bb (bb, rgn);
4911
  bbi += 1;
4912
  pos = RGN_BLOCKS (rgn) + bbi;
4913
 
4914
  gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4915
              && ebb_head[bbi] == pos);
4916
 
4917
  /* Make a place for the new block.  */
4918
  extend_regions ();
4919
 
4920
  for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4921
    BLOCK_TO_BB (rgn_bb_table[i])++;
4922
 
4923
  memmove (rgn_bb_table + pos + 1,
4924
           rgn_bb_table + pos,
4925
           (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4926
 
4927
  /* Initialize data for BB.  */
4928
  rgn_bb_table[pos] = bb->index;
4929
  BLOCK_TO_BB (bb->index) = bbi;
4930
  CONTAINING_RGN (bb->index) = rgn;
4931
 
4932
  RGN_NR_BLOCKS (rgn)++;
4933
 
4934
  for (i = rgn + 1; i <= nr_regions; i++)
4935
    RGN_BLOCKS (i)++;
4936
}
4937
 
4938
/* Remove BB from the current region and update the region data.  */
4939
static void
4940
remove_bb_from_region (basic_block bb)
4941
{
4942
  int i, pos, bbi = -2, rgn;
4943
 
4944
  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4945
  bbi = BLOCK_TO_BB (bb->index);
4946
  pos = RGN_BLOCKS (rgn) + bbi;
4947
 
4948
  gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4949
              && ebb_head[bbi] == pos);
4950
 
4951
  for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4952
    BLOCK_TO_BB (rgn_bb_table[i])--;
4953
 
4954
  memmove (rgn_bb_table + pos,
4955
           rgn_bb_table + pos + 1,
4956
           (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4957
 
4958
  RGN_NR_BLOCKS (rgn)--;
4959
  for (i = rgn + 1; i <= nr_regions; i++)
4960
    RGN_BLOCKS (i)--;
4961
}
4962
 
4963
/* Add BB to the current region  and update all data.  If BB is NULL, add all
4964
   blocks from last_added_blocks vector.  */
4965
static void
4966
sel_add_bb (basic_block bb)
4967
{
4968
  /* Extend luids so that new notes will receive zero luids.  */
4969
  sched_init_luids (NULL, NULL, NULL, NULL);
4970
  sched_init_bbs ();
4971
  sel_init_bbs (last_added_blocks, NULL);
4972
 
4973
  /* When bb is passed explicitly, the vector should contain
4974
     the only element that equals to bb; otherwise, the vector
4975
     should not be NULL.  */
4976
  gcc_assert (last_added_blocks != NULL);
4977
 
4978
  if (bb != NULL)
4979
    {
4980
      gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
4981
                  && VEC_index (basic_block,
4982
                                last_added_blocks, 0) == bb);
4983
      add_block_to_current_region (bb);
4984
 
4985
      /* We associate creating/deleting data sets with the first insn
4986
         appearing / disappearing in the bb.  */
4987
      if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
4988
        create_initial_data_sets (bb);
4989
 
4990
      VEC_free (basic_block, heap, last_added_blocks);
4991
    }
4992
  else
4993
    /* BB is NULL - process LAST_ADDED_BLOCKS instead.  */
4994
    {
4995
      int i;
4996
      basic_block temp_bb = NULL;
4997
 
4998
      for (i = 0;
4999
           VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5000
        {
5001
          add_block_to_current_region (bb);
5002
          temp_bb = bb;
5003
        }
5004
 
5005
      /* We need to fetch at least one bb so we know the region
5006
         to update.  */
5007
      gcc_assert (temp_bb != NULL);
5008
      bb = temp_bb;
5009
 
5010
      VEC_free (basic_block, heap, last_added_blocks);
5011
    }
5012
 
5013
  rgn_setup_region (CONTAINING_RGN (bb->index));
5014
}
5015
 
5016
/* Remove BB from the current region and update all data.
5017
   If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg.  */
5018
static void
5019
sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5020
{
5021
  gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5022
 
5023
  remove_bb_from_region (bb);
5024
  return_bb_to_pool (bb);
5025
  bitmap_clear_bit (blocks_to_reschedule, bb->index);
5026
 
5027
  if (remove_from_cfg_p)
5028
    delete_and_free_basic_block (bb);
5029
 
5030
  rgn_setup_region (CONTAINING_RGN (bb->index));
5031
}
5032
 
5033
/* Concatenate info of EMPTY_BB to info of MERGE_BB.  */
5034
static void
5035
move_bb_info (basic_block merge_bb, basic_block empty_bb)
5036
{
5037
  gcc_assert (in_current_region_p (merge_bb));
5038
 
5039
  concat_note_lists (BB_NOTE_LIST (empty_bb),
5040
                     &BB_NOTE_LIST (merge_bb));
5041
  BB_NOTE_LIST (empty_bb) = NULL_RTX;
5042
 
5043
}
5044
 
5045
/* Remove an empty basic block EMPTY_BB.  When MERGE_UP_P is true, we put
5046
   EMPTY_BB's note lists into its predecessor instead of putting them
5047
   into the successor.  When REMOVE_FROM_CFG_P is true, also remove
5048
   the empty block.  */
5049
void
5050
sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
5051
                     bool remove_from_cfg_p)
5052
{
5053
  basic_block merge_bb;
5054
 
5055
  gcc_assert (sel_bb_empty_p (empty_bb));
5056
 
5057
  if (merge_up_p)
5058
    {
5059
      merge_bb = empty_bb->prev_bb;
5060
      gcc_assert (EDGE_COUNT (empty_bb->preds) == 1
5061
                  && EDGE_PRED (empty_bb, 0)->src == merge_bb);
5062
    }
5063
  else
5064
    {
5065
      edge e;
5066
      edge_iterator ei;
5067
 
5068
      merge_bb = bb_next_bb (empty_bb);
5069
 
5070
      /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
5071
         successor block.  */
5072
      for (ei = ei_start (empty_bb->preds);
5073
           (e = ei_safe_edge (ei)); )
5074
        {
5075
          if (! (e->flags & EDGE_FALLTHRU))
5076
            sel_redirect_edge_and_branch (e, merge_bb);
5077
          else
5078
            ei_next (&ei);
5079
        }
5080
 
5081
      gcc_assert (EDGE_COUNT (empty_bb->succs) == 1
5082
                  && EDGE_SUCC (empty_bb, 0)->dest == merge_bb);
5083
    }
5084
 
5085
  move_bb_info (merge_bb, empty_bb);
5086
  remove_empty_bb (empty_bb, remove_from_cfg_p);
5087
}
5088
 
5089
/* Remove EMPTY_BB.  If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5090
   region, but keep it in CFG.  */
5091
static void
5092
remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5093
{
5094
  /* The block should contain just a note or a label.
5095
     We try to check whether it is unused below.  */
5096
  gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5097
              || LABEL_P (BB_HEAD (empty_bb)));
5098
 
5099
  /* If basic block has predecessors or successors, redirect them.  */
5100
  if (remove_from_cfg_p
5101
      && (EDGE_COUNT (empty_bb->preds) > 0
5102
          || EDGE_COUNT (empty_bb->succs) > 0))
5103
    {
5104
      basic_block pred;
5105
      basic_block succ;
5106
 
5107
      /* We need to init PRED and SUCC before redirecting edges.  */
5108
      if (EDGE_COUNT (empty_bb->preds) > 0)
5109
        {
5110
          edge e;
5111
 
5112
          gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5113
 
5114
          e = EDGE_PRED (empty_bb, 0);
5115
          gcc_assert (e->src == empty_bb->prev_bb
5116
                      && (e->flags & EDGE_FALLTHRU));
5117
 
5118
          pred = empty_bb->prev_bb;
5119
        }
5120
      else
5121
        pred = NULL;
5122
 
5123
      if (EDGE_COUNT (empty_bb->succs) > 0)
5124
        {
5125
          /* We do not check fallthruness here as above, because
5126
             after removing a jump the edge may actually be not fallthru.  */
5127
          gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5128
          succ = EDGE_SUCC (empty_bb, 0)->dest;
5129
        }
5130
      else
5131
        succ = NULL;
5132
 
5133
      if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5134
        {
5135
          edge e = EDGE_PRED (empty_bb, 0);
5136
 
5137
          if (e->flags & EDGE_FALLTHRU)
5138
            redirect_edge_succ_nodup (e, succ);
5139
          else
5140
            sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5141
        }
5142
 
5143
      if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5144
        {
5145
          edge e = EDGE_SUCC (empty_bb, 0);
5146
 
5147
          if (find_edge (pred, e->dest) == NULL)
5148
            redirect_edge_pred (e, pred);
5149
        }
5150
    }
5151
 
5152
  /* Finish removing.  */
5153
  sel_remove_bb (empty_bb, remove_from_cfg_p);
5154
}
5155
 
5156
/* An implementation of create_basic_block hook, which additionally updates
5157
   per-bb data structures.  */
5158
static basic_block
5159
sel_create_basic_block (void *headp, void *endp, basic_block after)
5160
{
5161
  basic_block new_bb;
5162
  insn_t new_bb_note;
5163
 
5164
  gcc_assert (flag_sel_sched_pipelining_outer_loops
5165
              || last_added_blocks == NULL);
5166
 
5167
  new_bb_note = get_bb_note_from_pool ();
5168
 
5169
  if (new_bb_note == NULL_RTX)
5170
    new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5171
  else
5172
    {
5173
      new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5174
                                             new_bb_note, after);
5175
      new_bb->aux = NULL;
5176
    }
5177
 
5178
  VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5179
 
5180
  return new_bb;
5181
}
5182
 
5183
/* Implement sched_init_only_bb ().  */
5184
static void
5185
sel_init_only_bb (basic_block bb, basic_block after)
5186
{
5187
  gcc_assert (after == NULL);
5188
 
5189
  extend_regions ();
5190
  rgn_make_new_region_out_of_new_block (bb);
5191
}
5192
 
5193
/* Update the latch when we've splitted or merged it from FROM block to TO.
5194
   This should be checked for all outer loops, too.  */
5195
static void
5196
change_loops_latches (basic_block from, basic_block to)
5197
{
5198
  gcc_assert (from != to);
5199
 
5200
  if (current_loop_nest)
5201
    {
5202
      struct loop *loop;
5203
 
5204
      for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5205
        if (considered_for_pipelining_p (loop) && loop->latch == from)
5206
          {
5207
            gcc_assert (loop == current_loop_nest);
5208
            loop->latch = to;
5209
            gcc_assert (loop_latch_edge (loop));
5210
          }
5211
    }
5212
}
5213
 
5214
/* Splits BB on two basic blocks, adding it to the region and extending
5215
   per-bb data structures.  Returns the newly created bb.  */
5216
static basic_block
5217
sel_split_block (basic_block bb, rtx after)
5218
{
5219
  basic_block new_bb;
5220
  insn_t insn;
5221
 
5222
  new_bb = sched_split_block_1 (bb, after);
5223
  sel_add_bb (new_bb);
5224
 
5225
  /* This should be called after sel_add_bb, because this uses
5226
     CONTAINING_RGN for the new block, which is not yet initialized.
5227
     FIXME: this function may be a no-op now.  */
5228
  change_loops_latches (bb, new_bb);
5229
 
5230
  /* Update ORIG_BB_INDEX for insns moved into the new block.  */
5231
  FOR_BB_INSNS (new_bb, insn)
5232
   if (INSN_P (insn))
5233
     EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5234
 
5235
  if (sel_bb_empty_p (bb))
5236
    {
5237
      gcc_assert (!sel_bb_empty_p (new_bb));
5238
 
5239
      /* NEW_BB has data sets that need to be updated and BB holds
5240
         data sets that should be removed.  Exchange these data sets
5241
         so that we won't lose BB's valid data sets.  */
5242
      exchange_data_sets (new_bb, bb);
5243
      free_data_sets (bb);
5244
    }
5245
 
5246
  if (!sel_bb_empty_p (new_bb)
5247
      && bitmap_bit_p (blocks_to_reschedule, bb->index))
5248
    bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5249
 
5250
  return new_bb;
5251
}
5252
 
5253
/* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5254
   Otherwise returns NULL.  */
5255
static rtx
5256
check_for_new_jump (basic_block bb, int prev_max_uid)
5257
{
5258
  rtx end;
5259
 
5260
  end = sel_bb_end (bb);
5261
  if (end && INSN_UID (end) >= prev_max_uid)
5262
    return end;
5263
  return NULL;
5264
}
5265
 
5266
/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5267
   New means having UID at least equal to PREV_MAX_UID.  */
5268
static rtx
5269
find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5270
{
5271
  rtx jump;
5272
 
5273
  /* Return immediately if no new insns were emitted.  */
5274
  if (get_max_uid () == prev_max_uid)
5275
    return NULL;
5276
 
5277
  /* Now check both blocks for new jumps.  It will ever be only one.  */
5278
  if ((jump = check_for_new_jump (from, prev_max_uid)))
5279
    return jump;
5280
 
5281
  if (jump_bb != NULL
5282
      && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5283
    return jump;
5284
  return NULL;
5285
}
5286
 
5287
/* Splits E and adds the newly created basic block to the current region.
5288
   Returns this basic block.  */
5289
basic_block
5290
sel_split_edge (edge e)
5291
{
5292
  basic_block new_bb, src, other_bb = NULL;
5293
  int prev_max_uid;
5294
  rtx jump;
5295
 
5296
  src = e->src;
5297
  prev_max_uid = get_max_uid ();
5298
  new_bb = split_edge (e);
5299
 
5300
  if (flag_sel_sched_pipelining_outer_loops
5301
      && current_loop_nest)
5302
    {
5303
      int i;
5304
      basic_block bb;
5305
 
5306
      /* Some of the basic blocks might not have been added to the loop.
5307
         Add them here, until this is fixed in force_fallthru.  */
5308
      for (i = 0;
5309
           VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5310
        if (!bb->loop_father)
5311
          {
5312
            add_bb_to_loop (bb, e->dest->loop_father);
5313
 
5314
            gcc_assert (!other_bb && (new_bb->index != bb->index));
5315
            other_bb = bb;
5316
          }
5317
    }
5318
 
5319
  /* Add all last_added_blocks to the region.  */
5320
  sel_add_bb (NULL);
5321
 
5322
  jump = find_new_jump (src, new_bb, prev_max_uid);
5323
  if (jump)
5324
    sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5325
 
5326
  /* Put the correct lv set on this block.  */
5327
  if (other_bb && !sel_bb_empty_p (other_bb))
5328
    compute_live (sel_bb_head (other_bb));
5329
 
5330
  return new_bb;
5331
}
5332
 
5333
/* Implement sched_create_empty_bb ().  */
5334
static basic_block
5335
sel_create_empty_bb (basic_block after)
5336
{
5337
  basic_block new_bb;
5338
 
5339
  new_bb = sched_create_empty_bb_1 (after);
5340
 
5341
  /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5342
     later.  */
5343
  gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5344
              && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5345
 
5346
  VEC_free (basic_block, heap, last_added_blocks);
5347
  return new_bb;
5348
}
5349
 
5350
/* Implement sched_create_recovery_block.  ORIG_INSN is where block
5351
   will be splitted to insert a check.  */
5352
basic_block
5353
sel_create_recovery_block (insn_t orig_insn)
5354
{
5355
  basic_block first_bb, second_bb, recovery_block;
5356
  basic_block before_recovery = NULL;
5357
  rtx jump;
5358
 
5359
  first_bb = BLOCK_FOR_INSN (orig_insn);
5360
  if (sel_bb_end_p (orig_insn))
5361
    {
5362
      /* Avoid introducing an empty block while splitting.  */
5363
      gcc_assert (single_succ_p (first_bb));
5364
      second_bb = single_succ (first_bb);
5365
    }
5366
  else
5367
    second_bb = sched_split_block (first_bb, orig_insn);
5368
 
5369
  recovery_block = sched_create_recovery_block (&before_recovery);
5370
  if (before_recovery)
5371
    copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5372
 
5373
  gcc_assert (sel_bb_empty_p (recovery_block));
5374
  sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5375
  if (current_loops != NULL)
5376
    add_bb_to_loop (recovery_block, first_bb->loop_father);
5377
 
5378
  sel_add_bb (recovery_block);
5379
 
5380
  jump = BB_END (recovery_block);
5381
  gcc_assert (sel_bb_head (recovery_block) == jump);
5382
  sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5383
 
5384
  return recovery_block;
5385
}
5386
 
5387
/* Merge basic block B into basic block A.  */
5388
void
5389
sel_merge_blocks (basic_block a, basic_block b)
5390
{
5391
  sel_remove_empty_bb (b, true, false);
5392
  merge_blocks (a, b);
5393
 
5394
  change_loops_latches (b, a);
5395
}
5396
 
5397
/* A wrapper for redirect_edge_and_branch_force, which also initializes
5398
   data structures for possibly created bb and insns.  Returns the newly
5399
   added bb or NULL, when a bb was not needed.  */
5400
void
5401
sel_redirect_edge_and_branch_force (edge e, basic_block to)
5402
{
5403
  basic_block jump_bb, src;
5404
  int prev_max_uid;
5405
  rtx jump;
5406
 
5407
  gcc_assert (!sel_bb_empty_p (e->src));
5408
 
5409
  src = e->src;
5410
  prev_max_uid = get_max_uid ();
5411
  jump_bb = redirect_edge_and_branch_force (e, to);
5412
 
5413
  if (jump_bb != NULL)
5414
    sel_add_bb (jump_bb);
5415
 
5416
  /* This function could not be used to spoil the loop structure by now,
5417
     thus we don't care to update anything.  But check it to be sure.  */
5418
  if (current_loop_nest
5419
      && pipelining_p)
5420
    gcc_assert (loop_latch_edge (current_loop_nest));
5421
 
5422
  jump = find_new_jump (src, jump_bb, prev_max_uid);
5423
  if (jump)
5424
    sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5425
}
5426
 
5427
/* A wrapper for redirect_edge_and_branch.  Return TRUE if blocks connected by
5428
   redirected edge are in reverse topological order.  */
5429
bool
5430
sel_redirect_edge_and_branch (edge e, basic_block to)
5431
{
5432
  bool latch_edge_p;
5433
  basic_block src;
5434
  int prev_max_uid;
5435
  rtx jump;
5436
  edge redirected;
5437
  bool recompute_toporder_p = false;
5438
 
5439
  latch_edge_p = (pipelining_p
5440
                  && current_loop_nest
5441
                  && e == loop_latch_edge (current_loop_nest));
5442
 
5443
  src = e->src;
5444
  prev_max_uid = get_max_uid ();
5445
 
5446
  redirected = redirect_edge_and_branch (e, to);
5447
 
5448
  gcc_assert (redirected && last_added_blocks == NULL);
5449
 
5450
  /* When we've redirected a latch edge, update the header.  */
5451
  if (latch_edge_p)
5452
    {
5453
      current_loop_nest->header = to;
5454
      gcc_assert (loop_latch_edge (current_loop_nest));
5455
    }
5456
 
5457
  /* In rare situations, the topological relation between the blocks connected
5458
     by the redirected edge can change (see PR42245 for an example).  Update
5459
     block_to_bb/bb_to_block.  */
5460
  if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5461
      && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5462
    recompute_toporder_p = true;
5463
 
5464
  jump = find_new_jump (src, NULL, prev_max_uid);
5465
  if (jump)
5466
    sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5467
 
5468
  return recompute_toporder_p;
5469
}
5470
 
5471
/* This variable holds the cfg hooks used by the selective scheduler.  */
5472
static struct cfg_hooks sel_cfg_hooks;
5473
 
5474
/* Register sel-sched cfg hooks.  */
5475
void
5476
sel_register_cfg_hooks (void)
5477
{
5478
  sched_split_block = sel_split_block;
5479
 
5480
  orig_cfg_hooks = get_cfg_hooks ();
5481
  sel_cfg_hooks = orig_cfg_hooks;
5482
 
5483
  sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5484
 
5485
  set_cfg_hooks (sel_cfg_hooks);
5486
 
5487
  sched_init_only_bb = sel_init_only_bb;
5488
  sched_split_block = sel_split_block;
5489
  sched_create_empty_bb = sel_create_empty_bb;
5490
}
5491
 
5492
/* Unregister sel-sched cfg hooks.  */
5493
void
5494
sel_unregister_cfg_hooks (void)
5495
{
5496
  sched_create_empty_bb = NULL;
5497
  sched_split_block = NULL;
5498
  sched_init_only_bb = NULL;
5499
 
5500
  set_cfg_hooks (orig_cfg_hooks);
5501
}
5502
 
5503
 
5504
/* Emit an insn rtx based on PATTERN.  If a jump insn is wanted,
5505
   LABEL is where this jump should be directed.  */
5506
rtx
5507
create_insn_rtx_from_pattern (rtx pattern, rtx label)
5508
{
5509
  rtx insn_rtx;
5510
 
5511
  gcc_assert (!INSN_P (pattern));
5512
 
5513
  start_sequence ();
5514
 
5515
  if (label == NULL_RTX)
5516
    insn_rtx = emit_insn (pattern);
5517
  else if (DEBUG_INSN_P (label))
5518
    insn_rtx = emit_debug_insn (pattern);
5519
  else
5520
    {
5521
      insn_rtx = emit_jump_insn (pattern);
5522
      JUMP_LABEL (insn_rtx) = label;
5523
      ++LABEL_NUSES (label);
5524
    }
5525
 
5526
  end_sequence ();
5527
 
5528
  sched_init_luids (NULL, NULL, NULL, NULL);
5529
  sched_extend_target ();
5530
  sched_deps_init (false);
5531
 
5532
  /* Initialize INSN_CODE now.  */
5533
  recog_memoized (insn_rtx);
5534
  return insn_rtx;
5535
}
5536
 
5537
/* Create a new vinsn for INSN_RTX.  FORCE_UNIQUE_P is true when the vinsn
5538
   must not be clonable.  */
5539
vinsn_t
5540
create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5541
{
5542
  gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5543
 
5544
  /* If VINSN_TYPE is not USE, retain its uniqueness.  */
5545
  return vinsn_create (insn_rtx, force_unique_p);
5546
}
5547
 
5548
/* Create a copy of INSN_RTX.  */
5549
rtx
5550
create_copy_of_insn_rtx (rtx insn_rtx)
5551
{
5552
  rtx res;
5553
 
5554
  if (DEBUG_INSN_P (insn_rtx))
5555
    return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5556
                                         insn_rtx);
5557
 
5558
  gcc_assert (NONJUMP_INSN_P (insn_rtx));
5559
 
5560
  res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5561
                                      NULL_RTX);
5562
  return res;
5563
}
5564
 
5565
/* Change vinsn field of EXPR to hold NEW_VINSN.  */
5566
void
5567
change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5568
{
5569
  vinsn_detach (EXPR_VINSN (expr));
5570
 
5571
  EXPR_VINSN (expr) = new_vinsn;
5572
  vinsn_attach (new_vinsn);
5573
}
5574
 
5575
/* Helpers for global init.  */
5576
/* This structure is used to be able to call existing bundling mechanism
5577
   and calculate insn priorities.  */
5578
static struct haifa_sched_info sched_sel_haifa_sched_info =
5579
{
5580
  NULL, /* init_ready_list */
5581
  NULL, /* can_schedule_ready_p */
5582
  NULL, /* schedule_more_p */
5583
  NULL, /* new_ready */
5584
  NULL, /* rgn_rank */
5585
  sel_print_insn, /* rgn_print_insn */
5586
  contributes_to_priority,
5587
  NULL, /* insn_finishes_block_p */
5588
 
5589
  NULL, NULL,
5590
  NULL, NULL,
5591
  0, 0,
5592
 
5593
  NULL, /* add_remove_insn */
5594
  NULL, /* begin_schedule_ready */
5595
  NULL, /* advance_target_bb */
5596
  SEL_SCHED | NEW_BBS
5597
};
5598
 
5599
/* Setup special insns used in the scheduler.  */
5600
void
5601
setup_nop_and_exit_insns (void)
5602
{
5603
  gcc_assert (nop_pattern == NULL_RTX
5604
              && exit_insn == NULL_RTX);
5605
 
5606
  nop_pattern = gen_nop ();
5607
 
5608
  start_sequence ();
5609
  emit_insn (nop_pattern);
5610
  exit_insn = get_insns ();
5611
  end_sequence ();
5612
  set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5613
}
5614
 
5615
/* Free special insns used in the scheduler.  */
5616
void
5617
free_nop_and_exit_insns (void)
5618
{
5619
  exit_insn = NULL_RTX;
5620
  nop_pattern = NULL_RTX;
5621
}
5622
 
5623
/* Setup a special vinsn used in new insns initialization.  */
5624
void
5625
setup_nop_vinsn (void)
5626
{
5627
  nop_vinsn = vinsn_create (exit_insn, false);
5628
  vinsn_attach (nop_vinsn);
5629
}
5630
 
5631
/* Free a special vinsn used in new insns initialization.  */
5632
void
5633
free_nop_vinsn (void)
5634
{
5635
  gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5636
  vinsn_detach (nop_vinsn);
5637
  nop_vinsn = NULL;
5638
}
5639
 
5640
/* Call a set_sched_flags hook.  */
5641
void
5642
sel_set_sched_flags (void)
5643
{
5644
  /* ??? This means that set_sched_flags were called, and we decided to
5645
     support speculation.  However, set_sched_flags also modifies flags
5646
     on current_sched_info, doing this only at global init.  And we
5647
     sometimes change c_s_i later.  So put the correct flags again.  */
5648
  if (spec_info && targetm.sched.set_sched_flags)
5649
    targetm.sched.set_sched_flags (spec_info);
5650
}
5651
 
5652
/* Setup pointers to global sched info structures.  */
5653
void
5654
sel_setup_sched_infos (void)
5655
{
5656
  rgn_setup_common_sched_info ();
5657
 
5658
  memcpy (&sel_common_sched_info, common_sched_info,
5659
          sizeof (sel_common_sched_info));
5660
 
5661
  sel_common_sched_info.fix_recovery_cfg = NULL;
5662
  sel_common_sched_info.add_block = NULL;
5663
  sel_common_sched_info.estimate_number_of_insns
5664
    = sel_estimate_number_of_insns;
5665
  sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5666
  sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5667
 
5668
  common_sched_info = &sel_common_sched_info;
5669
 
5670
  current_sched_info = &sched_sel_haifa_sched_info;
5671
  current_sched_info->sched_max_insns_priority =
5672
    get_rgn_sched_max_insns_priority ();
5673
 
5674
  sel_set_sched_flags ();
5675
}
5676
 
5677
 
5678
/* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5679
   *BB_ORD_INDEX after that is increased.  */
5680
static void
5681
sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5682
{
5683
  RGN_NR_BLOCKS (rgn) += 1;
5684
  RGN_DONT_CALC_DEPS (rgn) = 0;
5685
  RGN_HAS_REAL_EBB (rgn) = 0;
5686
  CONTAINING_RGN (bb->index) = rgn;
5687
  BLOCK_TO_BB (bb->index) = *bb_ord_index;
5688
  rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5689
  (*bb_ord_index)++;
5690
 
5691
  /* FIXME: it is true only when not scheduling ebbs.  */
5692
  RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5693
}
5694
 
5695
/* Functions to support pipelining of outer loops.  */
5696
 
5697
/* Creates a new empty region and returns it's number.  */
5698
static int
5699
sel_create_new_region (void)
5700
{
5701
  int new_rgn_number = nr_regions;
5702
 
5703
  RGN_NR_BLOCKS (new_rgn_number) = 0;
5704
 
5705
  /* FIXME: This will work only when EBBs are not created.  */
5706
  if (new_rgn_number != 0)
5707
    RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5708
      RGN_NR_BLOCKS (new_rgn_number - 1);
5709
  else
5710
    RGN_BLOCKS (new_rgn_number) = 0;
5711
 
5712
  /* Set the blocks of the next region so the other functions may
5713
     calculate the number of blocks in the region.  */
5714
  RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5715
    RGN_NR_BLOCKS (new_rgn_number);
5716
 
5717
  nr_regions++;
5718
 
5719
  return new_rgn_number;
5720
}
5721
 
5722
/* If X has a smaller topological sort number than Y, returns -1;
5723
   if greater, returns 1.  */
5724
static int
5725
bb_top_order_comparator (const void *x, const void *y)
5726
{
5727
  basic_block bb1 = *(const basic_block *) x;
5728
  basic_block bb2 = *(const basic_block *) y;
5729
 
5730
  gcc_assert (bb1 == bb2
5731
              || rev_top_order_index[bb1->index]
5732
                 != rev_top_order_index[bb2->index]);
5733
 
5734
  /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5735
     bbs with greater number should go earlier.  */
5736
  if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5737
    return -1;
5738
  else
5739
    return 1;
5740
}
5741
 
5742
/* Create a region for LOOP and return its number.  If we don't want
5743
   to pipeline LOOP, return -1.  */
5744
static int
5745
make_region_from_loop (struct loop *loop)
5746
{
5747
  unsigned int i;
5748
  int new_rgn_number = -1;
5749
  struct loop *inner;
5750
 
5751
  /* Basic block index, to be assigned to BLOCK_TO_BB.  */
5752
  int bb_ord_index = 0;
5753
  basic_block *loop_blocks;
5754
  basic_block preheader_block;
5755
 
5756
  if (loop->num_nodes
5757
      > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5758
    return -1;
5759
 
5760
  /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
5761
  for (inner = loop->inner; inner; inner = inner->inner)
5762
    if (flow_bb_inside_loop_p (inner, loop->latch))
5763
      return -1;
5764
 
5765
  loop->ninsns = num_loop_insns (loop);
5766
  if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5767
    return -1;
5768
 
5769
  loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5770
 
5771
  for (i = 0; i < loop->num_nodes; i++)
5772
    if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5773
      {
5774
        free (loop_blocks);
5775
        return -1;
5776
      }
5777
 
5778
  preheader_block = loop_preheader_edge (loop)->src;
5779
  gcc_assert (preheader_block);
5780
  gcc_assert (loop_blocks[0] == loop->header);
5781
 
5782
  new_rgn_number = sel_create_new_region ();
5783
 
5784
  sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5785
  SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5786
 
5787
  for (i = 0; i < loop->num_nodes; i++)
5788
    {
5789
      /* Add only those blocks that haven't been scheduled in the inner loop.
5790
         The exception is the basic blocks with bookkeeping code - they should
5791
         be added to the region (and they actually don't belong to the loop
5792
         body, but to the region containing that loop body).  */
5793
 
5794
      gcc_assert (new_rgn_number >= 0);
5795
 
5796
      if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5797
        {
5798
          sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
5799
                                   new_rgn_number);
5800
          SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5801
        }
5802
    }
5803
 
5804
  free (loop_blocks);
5805
  MARK_LOOP_FOR_PIPELINING (loop);
5806
 
5807
  return new_rgn_number;
5808
}
5809
 
5810
/* Create a new region from preheader blocks LOOP_BLOCKS.  */
5811
void
5812
make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
5813
{
5814
  unsigned int i;
5815
  int new_rgn_number = -1;
5816
  basic_block bb;
5817
 
5818
  /* Basic block index, to be assigned to BLOCK_TO_BB.  */
5819
  int bb_ord_index = 0;
5820
 
5821
  new_rgn_number = sel_create_new_region ();
5822
 
5823
  for (i = 0; VEC_iterate (basic_block, *loop_blocks, i, bb); i++)
5824
    {
5825
      gcc_assert (new_rgn_number >= 0);
5826
 
5827
      sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
5828
    }
5829
 
5830
  VEC_free (basic_block, heap, *loop_blocks);
5831
  gcc_assert (*loop_blocks == NULL);
5832
}
5833
 
5834
 
5835
/* Create region(s) from loop nest LOOP, such that inner loops will be
5836
   pipelined before outer loops.  Returns true when a region for LOOP
5837
   is created.  */
5838
static bool
5839
make_regions_from_loop_nest (struct loop *loop)
5840
{
5841
  struct loop *cur_loop;
5842
  int rgn_number;
5843
 
5844
  /* Traverse all inner nodes of the loop.  */
5845
  for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
5846
    if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
5847
      return false;
5848
 
5849
  /* At this moment all regular inner loops should have been pipelined.
5850
     Try to create a region from this loop.  */
5851
  rgn_number = make_region_from_loop (loop);
5852
 
5853
  if (rgn_number < 0)
5854
    return false;
5855
 
5856
  VEC_safe_push (loop_p, heap, loop_nests, loop);
5857
  return true;
5858
}
5859
 
5860
/* Initalize data structures needed.  */
5861
void
5862
sel_init_pipelining (void)
5863
{
5864
  /* Collect loop information to be used in outer loops pipelining.  */
5865
  loop_optimizer_init (LOOPS_HAVE_PREHEADERS
5866
                       | LOOPS_HAVE_FALLTHRU_PREHEADERS
5867
                       | LOOPS_HAVE_RECORDED_EXITS
5868
                       | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
5869
  current_loop_nest = NULL;
5870
 
5871
  bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
5872
  sbitmap_zero (bbs_in_loop_rgns);
5873
 
5874
  recompute_rev_top_order ();
5875
}
5876
 
5877
/* Returns a struct loop for region RGN.  */
5878
loop_p
5879
get_loop_nest_for_rgn (unsigned int rgn)
5880
{
5881
  /* Regions created with extend_rgns don't have corresponding loop nests,
5882
     because they don't represent loops.  */
5883
  if (rgn < VEC_length (loop_p, loop_nests))
5884
    return VEC_index (loop_p, loop_nests, rgn);
5885
  else
5886
    return NULL;
5887
}
5888
 
5889
/* True when LOOP was included into pipelining regions.   */
5890
bool
5891
considered_for_pipelining_p (struct loop *loop)
5892
{
5893
  if (loop_depth (loop) == 0)
5894
    return false;
5895
 
5896
  /* Now, the loop could be too large or irreducible.  Check whether its
5897
     region is in LOOP_NESTS.
5898
     We determine the region number of LOOP as the region number of its
5899
     latch.  We can't use header here, because this header could be
5900
     just removed preheader and it will give us the wrong region number.
5901
     Latch can't be used because it could be in the inner loop too.  */
5902
  if (LOOP_MARKED_FOR_PIPELINING_P (loop))
5903
    {
5904
      int rgn = CONTAINING_RGN (loop->latch->index);
5905
 
5906
      gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
5907
      return true;
5908
    }
5909
 
5910
  return false;
5911
}
5912
 
5913
/* Makes regions from the rest of the blocks, after loops are chosen
5914
   for pipelining.  */
5915
static void
5916
make_regions_from_the_rest (void)
5917
{
5918
  int cur_rgn_blocks;
5919
  int *loop_hdr;
5920
  int i;
5921
 
5922
  basic_block bb;
5923
  edge e;
5924
  edge_iterator ei;
5925
  int *degree;
5926
 
5927
  /* Index in rgn_bb_table where to start allocating new regions.  */
5928
  cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
5929
 
5930
  /* Make regions from all the rest basic blocks - those that don't belong to
5931
     any loop or belong to irreducible loops.  Prepare the data structures
5932
     for extend_rgns.  */
5933
 
5934
  /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
5935
     LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
5936
     loop.  */
5937
  loop_hdr = XNEWVEC (int, last_basic_block);
5938
  degree = XCNEWVEC (int, last_basic_block);
5939
 
5940
 
5941
  /* For each basic block that belongs to some loop assign the number
5942
     of innermost loop it belongs to.  */
5943
  for (i = 0; i < last_basic_block; i++)
5944
    loop_hdr[i] = -1;
5945
 
5946
  FOR_EACH_BB (bb)
5947
    {
5948
      if (bb->loop_father && !bb->loop_father->num == 0
5949
          && !(bb->flags & BB_IRREDUCIBLE_LOOP))
5950
        loop_hdr[bb->index] = bb->loop_father->num;
5951
    }
5952
 
5953
  /* For each basic block degree is calculated as the number of incoming
5954
     edges, that are going out of bbs that are not yet scheduled.
5955
     The basic blocks that are scheduled have degree value of zero.  */
5956
  FOR_EACH_BB (bb)
5957
    {
5958
      degree[bb->index] = 0;
5959
 
5960
      if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
5961
        {
5962
          FOR_EACH_EDGE (e, ei, bb->preds)
5963
            if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
5964
              degree[bb->index]++;
5965
        }
5966
      else
5967
        degree[bb->index] = -1;
5968
    }
5969
 
5970
  extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
5971
 
5972
  /* Any block that did not end up in a region is placed into a region
5973
     by itself.  */
5974
  FOR_EACH_BB (bb)
5975
    if (degree[bb->index] >= 0)
5976
      {
5977
        rgn_bb_table[cur_rgn_blocks] = bb->index;
5978
        RGN_NR_BLOCKS (nr_regions) = 1;
5979
        RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
5980
        RGN_DONT_CALC_DEPS (nr_regions) = 0;
5981
        RGN_HAS_REAL_EBB (nr_regions) = 0;
5982
        CONTAINING_RGN (bb->index) = nr_regions++;
5983
        BLOCK_TO_BB (bb->index) = 0;
5984
      }
5985
 
5986
  free (degree);
5987
  free (loop_hdr);
5988
}
5989
 
5990
/* Free data structures used in pipelining of loops.  */
5991
void sel_finish_pipelining (void)
5992
{
5993
  loop_iterator li;
5994
  struct loop *loop;
5995
 
5996
  /* Release aux fields so we don't free them later by mistake.  */
5997
  FOR_EACH_LOOP (li, loop, 0)
5998
    loop->aux = NULL;
5999
 
6000
  loop_optimizer_finalize ();
6001
 
6002
  VEC_free (loop_p, heap, loop_nests);
6003
 
6004
  free (rev_top_order_index);
6005
  rev_top_order_index = NULL;
6006
}
6007
 
6008
/* This function replaces the find_rgns when
6009
   FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set.  */
6010
void
6011
sel_find_rgns (void)
6012
{
6013
  sel_init_pipelining ();
6014
  extend_regions ();
6015
 
6016
  if (current_loops)
6017
    {
6018
      loop_p loop;
6019
      loop_iterator li;
6020
 
6021
      FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6022
                                ? LI_FROM_INNERMOST
6023
                                : LI_ONLY_INNERMOST))
6024
        make_regions_from_loop_nest (loop);
6025
    }
6026
 
6027
  /* Make regions from all the rest basic blocks and schedule them.
6028
     These blocks include blocks that don't belong to any loop or belong
6029
     to irreducible loops.  */
6030
  make_regions_from_the_rest ();
6031
 
6032
  /* We don't need bbs_in_loop_rgns anymore.  */
6033
  sbitmap_free (bbs_in_loop_rgns);
6034
  bbs_in_loop_rgns = NULL;
6035
}
6036
 
6037
/* Adds the preheader blocks from previous loop to current region taking
6038
   it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
6039
   This function is only used with -fsel-sched-pipelining-outer-loops.  */
6040
void
6041
sel_add_loop_preheaders (void)
6042
{
6043
  int i;
6044
  basic_block bb;
6045
  VEC(basic_block, heap) *preheader_blocks
6046
    = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6047
 
6048
  for (i = 0;
6049
       VEC_iterate (basic_block, preheader_blocks, i, bb);
6050
       i++)
6051
    {
6052
      VEC_safe_push (basic_block, heap, last_added_blocks, bb);
6053
      sel_add_bb (bb);
6054
    }
6055
 
6056
  VEC_free (basic_block, heap, preheader_blocks);
6057
}
6058
 
6059
/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6060
   Please note that the function should also work when pipelining_p is
6061
   false, because it is used when deciding whether we should or should
6062
   not reschedule pipelined code.  */
6063
bool
6064
sel_is_loop_preheader_p (basic_block bb)
6065
{
6066
  if (current_loop_nest)
6067
    {
6068
      struct loop *outer;
6069
 
6070
      if (preheader_removed)
6071
        return false;
6072
 
6073
      /* Preheader is the first block in the region.  */
6074
      if (BLOCK_TO_BB (bb->index) == 0)
6075
        return true;
6076
 
6077
      /* We used to find a preheader with the topological information.
6078
         Check that the above code is equivalent to what we did before.  */
6079
 
6080
      if (in_current_region_p (current_loop_nest->header))
6081
        gcc_assert (!(BLOCK_TO_BB (bb->index)
6082
                      < BLOCK_TO_BB (current_loop_nest->header->index)));
6083
 
6084
      /* Support the situation when the latch block of outer loop
6085
         could be from here.  */
6086
      for (outer = loop_outer (current_loop_nest);
6087
           outer;
6088
           outer = loop_outer (outer))
6089
        if (considered_for_pipelining_p (outer) && outer->latch == bb)
6090
          gcc_unreachable ();
6091
    }
6092
 
6093
  return false;
6094
}
6095
 
6096
/* Checks whether JUMP leads to basic block DEST_BB and no other blocks.  */
6097
bool
6098
jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
6099
{
6100
  basic_block jump_bb = BLOCK_FOR_INSN (jump);
6101
 
6102
  /* It is not jump, jump with side-effects or jump can lead to several
6103
     basic blocks.  */
6104
  if (!onlyjump_p (jump)
6105
      || !any_uncondjump_p (jump))
6106
    return false;
6107
 
6108
  /* Several outgoing edges, abnormal edge or destination of jump is
6109
     not DEST_BB.  */
6110
  if (EDGE_COUNT (jump_bb->succs) != 1
6111
      || EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
6112
      || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6113
    return false;
6114
 
6115
  /* If not anything of the upper.  */
6116
  return true;
6117
}
6118
 
6119
/* Removes the loop preheader from the current region and saves it in
6120
   PREHEADER_BLOCKS of the father loop, so they will be added later to
6121
   region that represents an outer loop.  */
6122
static void
6123
sel_remove_loop_preheader (void)
6124
{
6125
  int i, old_len;
6126
  int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6127
  basic_block bb;
6128
  bool all_empty_p = true;
6129
  VEC(basic_block, heap) *preheader_blocks
6130
    = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6131
 
6132
  gcc_assert (current_loop_nest);
6133
  old_len = VEC_length (basic_block, preheader_blocks);
6134
 
6135
  /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS.  */
6136
  for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6137
    {
6138
      bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6139
 
6140
      /* If the basic block belongs to region, but doesn't belong to
6141
         corresponding loop, then it should be a preheader.  */
6142
      if (sel_is_loop_preheader_p (bb))
6143
        {
6144
          VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6145
          if (BB_END (bb) != bb_note (bb))
6146
            all_empty_p = false;
6147
        }
6148
    }
6149
 
6150
  /* Remove these blocks only after iterating over the whole region.  */
6151
  for (i = VEC_length (basic_block, preheader_blocks) - 1;
6152
       i >= old_len;
6153
       i--)
6154
    {
6155
      bb =  VEC_index (basic_block, preheader_blocks, i);
6156
      sel_remove_bb (bb, false);
6157
    }
6158
 
6159
  if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6160
    {
6161
      if (!all_empty_p)
6162
        /* Immediately create new region from preheader.  */
6163
        make_region_from_loop_preheader (&preheader_blocks);
6164
      else
6165
        {
6166
          /* If all preheader blocks are empty - dont create new empty region.
6167
             Instead, remove them completely.  */
6168
          for (i = 0; VEC_iterate (basic_block, preheader_blocks, i, bb); i++)
6169
            {
6170
              edge e;
6171
              edge_iterator ei;
6172
              basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6173
 
6174
              /* Redirect all incoming edges to next basic block.  */
6175
              for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6176
                {
6177
                  if (! (e->flags & EDGE_FALLTHRU))
6178
                    redirect_edge_and_branch (e, bb->next_bb);
6179
                  else
6180
                    redirect_edge_succ (e, bb->next_bb);
6181
                }
6182
              gcc_assert (BB_NOTE_LIST (bb) == NULL);
6183
              delete_and_free_basic_block (bb);
6184
 
6185
              /* Check if after deleting preheader there is a nonconditional
6186
                 jump in PREV_BB that leads to the next basic block NEXT_BB.
6187
                 If it is so - delete this jump and clear data sets of its
6188
                 basic block if it becomes empty.  */
6189
              if (next_bb->prev_bb == prev_bb
6190
                  && prev_bb != ENTRY_BLOCK_PTR
6191
                  && jump_leads_only_to_bb_p (BB_END (prev_bb), next_bb))
6192
                {
6193
                  redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6194
                  if (BB_END (prev_bb) == bb_note (prev_bb))
6195
                    free_data_sets (prev_bb);
6196
                }
6197
            }
6198
        }
6199
      VEC_free (basic_block, heap, preheader_blocks);
6200
    }
6201
  else
6202
    /* Store preheader within the father's loop structure.  */
6203
    SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6204
                               preheader_blocks);
6205
}
6206
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.