OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [sched-deps.c] - Blame information for rev 801

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Instruction scheduling pass.  This file computes dependencies between
2
   instructions.
3
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5
   2011, 2012
6
   Free Software Foundation, Inc.
7
   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
8
   and currently maintained by, Jim Wilson (wilson@cygnus.com)
9
 
10
This file is part of GCC.
11
 
12
GCC is free software; you can redistribute it and/or modify it under
13
the terms of the GNU General Public License as published by the Free
14
Software Foundation; either version 3, or (at your option) any later
15
version.
16
 
17
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
18
WARRANTY; without even the implied warranty of MERCHANTABILITY or
19
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20
for more details.
21
 
22
You should have received a copy of the GNU General Public License
23
along with GCC; see the file COPYING3.  If not see
24
<http://www.gnu.org/licenses/>.  */
25
 
26
#include "config.h"
27
#include "system.h"
28
#include "coretypes.h"
29
#include "tm.h"
30
#include "diagnostic-core.h"
31
#include "rtl.h"
32
#include "tm_p.h"
33
#include "hard-reg-set.h"
34
#include "regs.h"
35
#include "function.h"
36
#include "flags.h"
37
#include "insn-config.h"
38
#include "insn-attr.h"
39
#include "except.h"
40
#include "recog.h"
41
#include "sched-int.h"
42
#include "params.h"
43
#include "cselib.h"
44
#include "ira.h"
45
#include "target.h"
46
 
47
#ifdef INSN_SCHEDULING
48
 
49
#ifdef ENABLE_CHECKING
50
#define CHECK (true)
51
#else
52
#define CHECK (false)
53
#endif
54
 
55
/* Holds current parameters for the dependency analyzer.  */
56
struct sched_deps_info_def *sched_deps_info;
57
 
58
/* The data is specific to the Haifa scheduler.  */
59
VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
60
 
61
/* Return the major type present in the DS.  */
62
enum reg_note
63
ds_to_dk (ds_t ds)
64
{
65
  if (ds & DEP_TRUE)
66
    return REG_DEP_TRUE;
67
 
68
  if (ds & DEP_OUTPUT)
69
    return REG_DEP_OUTPUT;
70
 
71
  if (ds & DEP_CONTROL)
72
    return REG_DEP_CONTROL;
73
 
74
  gcc_assert (ds & DEP_ANTI);
75
 
76
  return REG_DEP_ANTI;
77
}
78
 
79
/* Return equivalent dep_status.  */
80
ds_t
81
dk_to_ds (enum reg_note dk)
82
{
83
  switch (dk)
84
    {
85
    case REG_DEP_TRUE:
86
      return DEP_TRUE;
87
 
88
    case REG_DEP_OUTPUT:
89
      return DEP_OUTPUT;
90
 
91
    case REG_DEP_CONTROL:
92
      return DEP_CONTROL;
93
 
94
    default:
95
      gcc_assert (dk == REG_DEP_ANTI);
96
      return DEP_ANTI;
97
    }
98
}
99
 
100
/* Functions to operate with dependence information container - dep_t.  */
101
 
102
/* Init DEP with the arguments.  */
103
void
104
init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
105
{
106
  DEP_PRO (dep) = pro;
107
  DEP_CON (dep) = con;
108
  DEP_TYPE (dep) = type;
109
  DEP_STATUS (dep) = ds;
110
  DEP_COST (dep) = UNKNOWN_DEP_COST;
111
}
112
 
113
/* Init DEP with the arguments.
114
   While most of the scheduler (including targets) only need the major type
115
   of the dependency, it is convenient to hide full dep_status from them.  */
116
void
117
init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
118
{
119
  ds_t ds;
120
 
121
  if ((current_sched_info->flags & USE_DEPS_LIST))
122
    ds = dk_to_ds (kind);
123
  else
124
    ds = 0;
125
 
126
  init_dep_1 (dep, pro, con, kind, ds);
127
}
128
 
129
/* Make a copy of FROM in TO.  */
130
static void
131
copy_dep (dep_t to, dep_t from)
132
{
133
  memcpy (to, from, sizeof (*to));
134
}
135
 
136
static void dump_ds (FILE *, ds_t);
137
 
138
/* Define flags for dump_dep ().  */
139
 
140
/* Dump producer of the dependence.  */
141
#define DUMP_DEP_PRO (2)
142
 
143
/* Dump consumer of the dependence.  */
144
#define DUMP_DEP_CON (4)
145
 
146
/* Dump type of the dependence.  */
147
#define DUMP_DEP_TYPE (8)
148
 
149
/* Dump status of the dependence.  */
150
#define DUMP_DEP_STATUS (16)
151
 
152
/* Dump all information about the dependence.  */
153
#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE       \
154
                      |DUMP_DEP_STATUS)
155
 
156
/* Dump DEP to DUMP.
157
   FLAGS is a bit mask specifying what information about DEP needs
158
   to be printed.
159
   If FLAGS has the very first bit set, then dump all information about DEP
160
   and propagate this bit into the callee dump functions.  */
161
static void
162
dump_dep (FILE *dump, dep_t dep, int flags)
163
{
164
  if (flags & 1)
165
    flags |= DUMP_DEP_ALL;
166
 
167
  fprintf (dump, "<");
168
 
169
  if (flags & DUMP_DEP_PRO)
170
    fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
171
 
172
  if (flags & DUMP_DEP_CON)
173
    fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
174
 
175
  if (flags & DUMP_DEP_TYPE)
176
    {
177
      char t;
178
      enum reg_note type = DEP_TYPE (dep);
179
 
180
      switch (type)
181
        {
182
        case REG_DEP_TRUE:
183
          t = 't';
184
          break;
185
 
186
        case REG_DEP_OUTPUT:
187
          t = 'o';
188
          break;
189
 
190
        case REG_DEP_CONTROL:
191
          t = 'c';
192
          break;
193
 
194
        case REG_DEP_ANTI:
195
          t = 'a';
196
          break;
197
 
198
        default:
199
          gcc_unreachable ();
200
          break;
201
        }
202
 
203
      fprintf (dump, "%c; ", t);
204
    }
205
 
206
  if (flags & DUMP_DEP_STATUS)
207
    {
208
      if (current_sched_info->flags & USE_DEPS_LIST)
209
        dump_ds (dump, DEP_STATUS (dep));
210
    }
211
 
212
  fprintf (dump, ">");
213
}
214
 
215
/* Default flags for dump_dep ().  */
216
static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
217
 
218
/* Dump all fields of DEP to STDERR.  */
219
void
220
sd_debug_dep (dep_t dep)
221
{
222
  dump_dep (stderr, dep, 1);
223
  fprintf (stderr, "\n");
224
}
225
 
226
/* Determine whether DEP is a dependency link of a non-debug insn on a
227
   debug insn.  */
228
 
229
static inline bool
230
depl_on_debug_p (dep_link_t dep)
231
{
232
  return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
233
          && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
234
}
235
 
236
/* Functions to operate with a single link from the dependencies lists -
237
   dep_link_t.  */
238
 
239
/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
240
   PREV_NEXT_P.  */
241
static void
242
attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
243
{
244
  dep_link_t next = *prev_nextp;
245
 
246
  gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
247
              && DEP_LINK_NEXT (l) == NULL);
248
 
249
  /* Init node being inserted.  */
250
  DEP_LINK_PREV_NEXTP (l) = prev_nextp;
251
  DEP_LINK_NEXT (l) = next;
252
 
253
  /* Fix next node.  */
254
  if (next != NULL)
255
    {
256
      gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
257
 
258
      DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
259
    }
260
 
261
  /* Fix prev node.  */
262
  *prev_nextp = l;
263
}
264
 
265
/* Add dep_link LINK to deps_list L.  */
266
static void
267
add_to_deps_list (dep_link_t link, deps_list_t l)
268
{
269
  attach_dep_link (link, &DEPS_LIST_FIRST (l));
270
 
271
  /* Don't count debug deps.  */
272
  if (!depl_on_debug_p (link))
273
    ++DEPS_LIST_N_LINKS (l);
274
}
275
 
276
/* Detach dep_link L from the list.  */
277
static void
278
detach_dep_link (dep_link_t l)
279
{
280
  dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
281
  dep_link_t next = DEP_LINK_NEXT (l);
282
 
283
  *prev_nextp = next;
284
 
285
  if (next != NULL)
286
    DEP_LINK_PREV_NEXTP (next) = prev_nextp;
287
 
288
  DEP_LINK_PREV_NEXTP (l) = NULL;
289
  DEP_LINK_NEXT (l) = NULL;
290
}
291
 
292
/* Remove link LINK from list LIST.  */
293
static void
294
remove_from_deps_list (dep_link_t link, deps_list_t list)
295
{
296
  detach_dep_link (link);
297
 
298
  /* Don't count debug deps.  */
299
  if (!depl_on_debug_p (link))
300
    --DEPS_LIST_N_LINKS (list);
301
}
302
 
303
/* Move link LINK from list FROM to list TO.  */
304
static void
305
move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
306
{
307
  remove_from_deps_list (link, from);
308
  add_to_deps_list (link, to);
309
}
310
 
311
/* Return true of LINK is not attached to any list.  */
312
static bool
313
dep_link_is_detached_p (dep_link_t link)
314
{
315
  return DEP_LINK_PREV_NEXTP (link) == NULL;
316
}
317
 
318
/* Pool to hold all dependency nodes (dep_node_t).  */
319
static alloc_pool dn_pool;
320
 
321
/* Number of dep_nodes out there.  */
322
static int dn_pool_diff = 0;
323
 
324
/* Create a dep_node.  */
325
static dep_node_t
326
create_dep_node (void)
327
{
328
  dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
329
  dep_link_t back = DEP_NODE_BACK (n);
330
  dep_link_t forw = DEP_NODE_FORW (n);
331
 
332
  DEP_LINK_NODE (back) = n;
333
  DEP_LINK_NEXT (back) = NULL;
334
  DEP_LINK_PREV_NEXTP (back) = NULL;
335
 
336
  DEP_LINK_NODE (forw) = n;
337
  DEP_LINK_NEXT (forw) = NULL;
338
  DEP_LINK_PREV_NEXTP (forw) = NULL;
339
 
340
  ++dn_pool_diff;
341
 
342
  return n;
343
}
344
 
345
/* Delete dep_node N.  N must not be connected to any deps_list.  */
346
static void
347
delete_dep_node (dep_node_t n)
348
{
349
  gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
350
              && dep_link_is_detached_p (DEP_NODE_FORW (n)));
351
 
352
  --dn_pool_diff;
353
 
354
  pool_free (dn_pool, n);
355
}
356
 
357
/* Pool to hold dependencies lists (deps_list_t).  */
358
static alloc_pool dl_pool;
359
 
360
/* Number of deps_lists out there.  */
361
static int dl_pool_diff = 0;
362
 
363
/* Functions to operate with dependences lists - deps_list_t.  */
364
 
365
/* Return true if list L is empty.  */
366
static bool
367
deps_list_empty_p (deps_list_t l)
368
{
369
  return DEPS_LIST_N_LINKS (l) == 0;
370
}
371
 
372
/* Create a new deps_list.  */
373
static deps_list_t
374
create_deps_list (void)
375
{
376
  deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
377
 
378
  DEPS_LIST_FIRST (l) = NULL;
379
  DEPS_LIST_N_LINKS (l) = 0;
380
 
381
  ++dl_pool_diff;
382
  return l;
383
}
384
 
385
/* Free deps_list L.  */
386
static void
387
free_deps_list (deps_list_t l)
388
{
389
  gcc_assert (deps_list_empty_p (l));
390
 
391
  --dl_pool_diff;
392
 
393
  pool_free (dl_pool, l);
394
}
395
 
396
/* Return true if there is no dep_nodes and deps_lists out there.
397
   After the region is scheduled all the dependency nodes and lists
398
   should [generally] be returned to pool.  */
399
bool
400
deps_pools_are_empty_p (void)
401
{
402
  return dn_pool_diff == 0 && dl_pool_diff == 0;
403
}
404
 
405
/* Remove all elements from L.  */
406
static void
407
clear_deps_list (deps_list_t l)
408
{
409
  do
410
    {
411
      dep_link_t link = DEPS_LIST_FIRST (l);
412
 
413
      if (link == NULL)
414
        break;
415
 
416
      remove_from_deps_list (link, l);
417
    }
418
  while (1);
419
}
420
 
421
/* Decide whether a dependency should be treated as a hard or a speculative
422
   dependency.  */
423
static bool
424
dep_spec_p (dep_t dep)
425
{
426
  if (current_sched_info->flags & DO_SPECULATION)
427
    {
428
      if (DEP_STATUS (dep) & SPECULATIVE)
429
        return true;
430
    }
431
  if (current_sched_info->flags & DO_PREDICATION)
432
    {
433
      if (DEP_TYPE (dep) == REG_DEP_CONTROL)
434
        return true;
435
    }
436
  return false;
437
}
438
 
439
static regset reg_pending_sets;
440
static regset reg_pending_clobbers;
441
static regset reg_pending_uses;
442
static regset reg_pending_control_uses;
443
static enum reg_pending_barrier_mode reg_pending_barrier;
444
 
445
/* Hard registers implicitly clobbered or used (or may be implicitly
446
   clobbered or used) by the currently analyzed insn.  For example,
447
   insn in its constraint has one register class.  Even if there is
448
   currently no hard register in the insn, the particular hard
449
   register will be in the insn after reload pass because the
450
   constraint requires it.  */
451
static HARD_REG_SET implicit_reg_pending_clobbers;
452
static HARD_REG_SET implicit_reg_pending_uses;
453
 
454
/* To speed up the test for duplicate dependency links we keep a
455
   record of dependencies created by add_dependence when the average
456
   number of instructions in a basic block is very large.
457
 
458
   Studies have shown that there is typically around 5 instructions between
459
   branches for typical C code.  So we can make a guess that the average
460
   basic block is approximately 5 instructions long; we will choose 100X
461
   the average size as a very large basic block.
462
 
463
   Each insn has associated bitmaps for its dependencies.  Each bitmap
464
   has enough entries to represent a dependency on any other insn in
465
   the insn chain.  All bitmap for true dependencies cache is
466
   allocated then the rest two ones are also allocated.  */
467
static bitmap_head *true_dependency_cache = NULL;
468
static bitmap_head *output_dependency_cache = NULL;
469
static bitmap_head *anti_dependency_cache = NULL;
470
static bitmap_head *control_dependency_cache = NULL;
471
static bitmap_head *spec_dependency_cache = NULL;
472
static int cache_size;
473
 
474
static int deps_may_trap_p (const_rtx);
475
static void add_dependence_1 (rtx, rtx, enum reg_note);
476
static void add_dependence_list (rtx, rtx, int, enum reg_note);
477
static void add_dependence_list_and_free (struct deps_desc *, rtx,
478
                                          rtx *, int, enum reg_note);
479
static void delete_all_dependences (rtx);
480
static void fixup_sched_groups (rtx);
481
 
482
static void flush_pending_lists (struct deps_desc *, rtx, int, int);
483
static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
484
static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
485
static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
486
 
487
static bool sched_has_condition_p (const_rtx);
488
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
489
 
490
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
491
                                                          rtx, rtx);
492
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
493
 
494
#ifdef ENABLE_CHECKING
495
static void check_dep (dep_t, bool);
496
#endif
497
 
498
/* Return nonzero if a load of the memory reference MEM can cause a trap.  */
499
 
500
static int
501
deps_may_trap_p (const_rtx mem)
502
{
503
  const_rtx addr = XEXP (mem, 0);
504
 
505
  if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
506
    {
507
      const_rtx t = get_reg_known_value (REGNO (addr));
508
      if (t)
509
        addr = t;
510
    }
511
  return rtx_addr_can_trap_p (addr);
512
}
513
 
514
 
515
/* Find the condition under which INSN is executed.  If REV is not NULL,
516
   it is set to TRUE when the returned comparison should be reversed
517
   to get the actual condition.  */
518
static rtx
519
sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
520
{
521
  rtx pat = PATTERN (insn);
522
  rtx src;
523
 
524
  if (rev)
525
    *rev = false;
526
 
527
  if (GET_CODE (pat) == COND_EXEC)
528
    return COND_EXEC_TEST (pat);
529
 
530
  if (!any_condjump_p (insn) || !onlyjump_p (insn))
531
    return 0;
532
 
533
  src = SET_SRC (pc_set (insn));
534
 
535
  if (XEXP (src, 2) == pc_rtx)
536
    return XEXP (src, 0);
537
  else if (XEXP (src, 1) == pc_rtx)
538
    {
539
      rtx cond = XEXP (src, 0);
540
      enum rtx_code revcode = reversed_comparison_code (cond, insn);
541
 
542
      if (revcode == UNKNOWN)
543
        return 0;
544
 
545
      if (rev)
546
        *rev = true;
547
      return cond;
548
    }
549
 
550
  return 0;
551
}
552
 
553
/* Return the condition under which INSN does not execute (i.e.  the
554
   not-taken condition for a conditional branch), or NULL if we cannot
555
   find such a condition.  The caller should make a copy of the condition
556
   before using it.  */
557
rtx
558
sched_get_reverse_condition_uncached (const_rtx insn)
559
{
560
  bool rev;
561
  rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
562
  if (cond == NULL_RTX)
563
    return cond;
564
  if (!rev)
565
    {
566
      enum rtx_code revcode = reversed_comparison_code (cond, insn);
567
      cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
568
                             XEXP (cond, 0),
569
                             XEXP (cond, 1));
570
    }
571
  return cond;
572
}
573
 
574
/* Caching variant of sched_get_condition_with_rev_uncached.
575
   We only do actual work the first time we come here for an insn; the
576
   results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
577
static rtx
578
sched_get_condition_with_rev (const_rtx insn, bool *rev)
579
{
580
  bool tmp;
581
 
582
  if (INSN_LUID (insn) == 0)
583
    return sched_get_condition_with_rev_uncached (insn, rev);
584
 
585
  if (INSN_CACHED_COND (insn) == const_true_rtx)
586
    return NULL_RTX;
587
 
588
  if (INSN_CACHED_COND (insn) != NULL_RTX)
589
    {
590
      if (rev)
591
        *rev = INSN_REVERSE_COND (insn);
592
      return INSN_CACHED_COND (insn);
593
    }
594
 
595
  INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
596
  INSN_REVERSE_COND (insn) = tmp;
597
 
598
  if (INSN_CACHED_COND (insn) == NULL_RTX)
599
    {
600
      INSN_CACHED_COND (insn) = const_true_rtx;
601
      return NULL_RTX;
602
    }
603
 
604
  if (rev)
605
    *rev = INSN_REVERSE_COND (insn);
606
  return INSN_CACHED_COND (insn);
607
}
608
 
609
/* True when we can find a condition under which INSN is executed.  */
610
static bool
611
sched_has_condition_p (const_rtx insn)
612
{
613
  return !! sched_get_condition_with_rev (insn, NULL);
614
}
615
 
616
 
617
 
618
/* Return nonzero if conditions COND1 and COND2 can never be both true.  */
619
static int
620
conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
621
{
622
  if (COMPARISON_P (cond1)
623
      && COMPARISON_P (cond2)
624
      && GET_CODE (cond1) ==
625
          (rev1==rev2
626
          ? reversed_comparison_code (cond2, NULL)
627
          : GET_CODE (cond2))
628
      && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
629
      && XEXP (cond1, 1) == XEXP (cond2, 1))
630
    return 1;
631
  return 0;
632
}
633
 
634
/* Return true if insn1 and insn2 can never depend on one another because
635
   the conditions under which they are executed are mutually exclusive.  */
636
bool
637
sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
638
{
639
  rtx cond1, cond2;
640
  bool rev1 = false, rev2 = false;
641
 
642
  /* df doesn't handle conditional lifetimes entirely correctly;
643
     calls mess up the conditional lifetimes.  */
644
  if (!CALL_P (insn1) && !CALL_P (insn2))
645
    {
646
      cond1 = sched_get_condition_with_rev (insn1, &rev1);
647
      cond2 = sched_get_condition_with_rev (insn2, &rev2);
648
      if (cond1 && cond2
649
          && conditions_mutex_p (cond1, cond2, rev1, rev2)
650
          /* Make sure first instruction doesn't affect condition of second
651
             instruction if switched.  */
652
          && !modified_in_p (cond1, insn2)
653
          /* Make sure second instruction doesn't affect condition of first
654
             instruction if switched.  */
655
          && !modified_in_p (cond2, insn1))
656
        return true;
657
    }
658
  return false;
659
}
660
 
661
 
662
/* Return true if INSN can potentially be speculated with type DS.  */
663
bool
664
sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
665
{
666
  if (HAS_INTERNAL_DEP (insn))
667
    return false;
668
 
669
  if (!NONJUMP_INSN_P (insn))
670
    return false;
671
 
672
  if (SCHED_GROUP_P (insn))
673
    return false;
674
 
675
  if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
676
    return false;
677
 
678
  if (side_effects_p (PATTERN (insn)))
679
    return false;
680
 
681
  if (ds & BE_IN_SPEC)
682
    /* The following instructions, which depend on a speculatively scheduled
683
       instruction, cannot be speculatively scheduled along.  */
684
    {
685
      if (may_trap_or_fault_p (PATTERN (insn)))
686
        /* If instruction might fault, it cannot be speculatively scheduled.
687
           For control speculation it's obvious why and for data speculation
688
           it's because the insn might get wrong input if speculation
689
           wasn't successful.  */
690
        return false;
691
 
692
      if ((ds & BE_IN_DATA)
693
          && sched_has_condition_p (insn))
694
        /* If this is a predicated instruction, then it cannot be
695
           speculatively scheduled.  See PR35659.  */
696
        return false;
697
    }
698
 
699
  return true;
700
}
701
 
702
/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
703
   initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
704
   and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
705
   This function is used to switch sd_iterator to the next list.
706
   !!! For internal use only.  Might consider moving it to sched-int.h.  */
707
void
708
sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
709
              deps_list_t *list_ptr, bool *resolved_p_ptr)
710
{
711
  sd_list_types_def types = *types_ptr;
712
 
713
  if (types & SD_LIST_HARD_BACK)
714
    {
715
      *list_ptr = INSN_HARD_BACK_DEPS (insn);
716
      *resolved_p_ptr = false;
717
      *types_ptr = types & ~SD_LIST_HARD_BACK;
718
    }
719
  else if (types & SD_LIST_SPEC_BACK)
720
    {
721
      *list_ptr = INSN_SPEC_BACK_DEPS (insn);
722
      *resolved_p_ptr = false;
723
      *types_ptr = types & ~SD_LIST_SPEC_BACK;
724
    }
725
  else if (types & SD_LIST_FORW)
726
    {
727
      *list_ptr = INSN_FORW_DEPS (insn);
728
      *resolved_p_ptr = false;
729
      *types_ptr = types & ~SD_LIST_FORW;
730
    }
731
  else if (types & SD_LIST_RES_BACK)
732
    {
733
      *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
734
      *resolved_p_ptr = true;
735
      *types_ptr = types & ~SD_LIST_RES_BACK;
736
    }
737
  else if (types & SD_LIST_RES_FORW)
738
    {
739
      *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
740
      *resolved_p_ptr = true;
741
      *types_ptr = types & ~SD_LIST_RES_FORW;
742
    }
743
  else
744
    {
745
      *list_ptr = NULL;
746
      *resolved_p_ptr = false;
747
      *types_ptr = SD_LIST_NONE;
748
    }
749
}
750
 
751
/* Return the summary size of INSN's lists defined by LIST_TYPES.  */
752
int
753
sd_lists_size (const_rtx insn, sd_list_types_def list_types)
754
{
755
  int size = 0;
756
 
757
  while (list_types != SD_LIST_NONE)
758
    {
759
      deps_list_t list;
760
      bool resolved_p;
761
 
762
      sd_next_list (insn, &list_types, &list, &resolved_p);
763
      if (list)
764
        size += DEPS_LIST_N_LINKS (list);
765
    }
766
 
767
  return size;
768
}
769
 
770
/* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
771
 
772
bool
773
sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
774
{
775
  while (list_types != SD_LIST_NONE)
776
    {
777
      deps_list_t list;
778
      bool resolved_p;
779
 
780
      sd_next_list (insn, &list_types, &list, &resolved_p);
781
      if (!deps_list_empty_p (list))
782
        return false;
783
    }
784
 
785
  return true;
786
}
787
 
788
/* Initialize data for INSN.  */
789
void
790
sd_init_insn (rtx insn)
791
{
792
  INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
793
  INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
794
  INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
795
  INSN_FORW_DEPS (insn) = create_deps_list ();
796
  INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
797
 
798
  /* ??? It would be nice to allocate dependency caches here.  */
799
}
800
 
801
/* Free data for INSN.  */
802
void
803
sd_finish_insn (rtx insn)
804
{
805
  /* ??? It would be nice to deallocate dependency caches here.  */
806
 
807
  free_deps_list (INSN_HARD_BACK_DEPS (insn));
808
  INSN_HARD_BACK_DEPS (insn) = NULL;
809
 
810
  free_deps_list (INSN_SPEC_BACK_DEPS (insn));
811
  INSN_SPEC_BACK_DEPS (insn) = NULL;
812
 
813
  free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
814
  INSN_RESOLVED_BACK_DEPS (insn) = NULL;
815
 
816
  free_deps_list (INSN_FORW_DEPS (insn));
817
  INSN_FORW_DEPS (insn) = NULL;
818
 
819
  free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
820
  INSN_RESOLVED_FORW_DEPS (insn) = NULL;
821
}
822
 
823
/* Find a dependency between producer PRO and consumer CON.
824
   Search through resolved dependency lists if RESOLVED_P is true.
825
   If no such dependency is found return NULL,
826
   otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
827
   with an iterator pointing to it.  */
828
static dep_t
829
sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
830
                              sd_iterator_def *sd_it_ptr)
831
{
832
  sd_list_types_def pro_list_type;
833
  sd_list_types_def con_list_type;
834
  sd_iterator_def sd_it;
835
  dep_t dep;
836
  bool found_p = false;
837
 
838
  if (resolved_p)
839
    {
840
      pro_list_type = SD_LIST_RES_FORW;
841
      con_list_type = SD_LIST_RES_BACK;
842
    }
843
  else
844
    {
845
      pro_list_type = SD_LIST_FORW;
846
      con_list_type = SD_LIST_BACK;
847
    }
848
 
849
  /* Walk through either back list of INSN or forw list of ELEM
850
     depending on which one is shorter.  */
851
  if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
852
    {
853
      /* Find the dep_link with producer PRO in consumer's back_deps.  */
854
      FOR_EACH_DEP (con, con_list_type, sd_it, dep)
855
        if (DEP_PRO (dep) == pro)
856
          {
857
            found_p = true;
858
            break;
859
          }
860
    }
861
  else
862
    {
863
      /* Find the dep_link with consumer CON in producer's forw_deps.  */
864
      FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
865
        if (DEP_CON (dep) == con)
866
          {
867
            found_p = true;
868
            break;
869
          }
870
    }
871
 
872
  if (found_p)
873
    {
874
      if (sd_it_ptr != NULL)
875
        *sd_it_ptr = sd_it;
876
 
877
      return dep;
878
    }
879
 
880
  return NULL;
881
}
882
 
883
/* Find a dependency between producer PRO and consumer CON.
884
   Use dependency [if available] to check if dependency is present at all.
885
   Search through resolved dependency lists if RESOLVED_P is true.
886
   If the dependency or NULL if none found.  */
887
dep_t
888
sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
889
{
890
  if (true_dependency_cache != NULL)
891
    /* Avoiding the list walk below can cut compile times dramatically
892
       for some code.  */
893
    {
894
      int elem_luid = INSN_LUID (pro);
895
      int insn_luid = INSN_LUID (con);
896
 
897
      if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
898
          && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
899
          && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
900
          && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
901
        return NULL;
902
    }
903
 
904
  return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
905
}
906
 
907
/* Add or update  a dependence described by DEP.
908
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
909
   data speculation.
910
 
911
   The function returns a value indicating if an old entry has been changed
912
   or a new entry has been added to insn's backward deps.
913
 
914
   This function merely checks if producer and consumer is the same insn
915
   and doesn't create a dep in this case.  Actual manipulation of
916
   dependence data structures is performed in add_or_update_dep_1.  */
917
static enum DEPS_ADJUST_RESULT
918
maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
919
{
920
  rtx elem = DEP_PRO (dep);
921
  rtx insn = DEP_CON (dep);
922
 
923
  gcc_assert (INSN_P (insn) && INSN_P (elem));
924
 
925
  /* Don't depend an insn on itself.  */
926
  if (insn == elem)
927
    {
928
      if (sched_deps_info->generate_spec_deps)
929
        /* INSN has an internal dependence, which we can't overcome.  */
930
        HAS_INTERNAL_DEP (insn) = 1;
931
 
932
      return DEP_NODEP;
933
    }
934
 
935
  return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
936
}
937
 
938
/* Ask dependency caches what needs to be done for dependence DEP.
939
   Return DEP_CREATED if new dependence should be created and there is no
940
   need to try to find one searching the dependencies lists.
941
   Return DEP_PRESENT if there already is a dependence described by DEP and
942
   hence nothing is to be done.
943
   Return DEP_CHANGED if there already is a dependence, but it should be
944
   updated to incorporate additional information from DEP.  */
945
static enum DEPS_ADJUST_RESULT
946
ask_dependency_caches (dep_t dep)
947
{
948
  int elem_luid = INSN_LUID (DEP_PRO (dep));
949
  int insn_luid = INSN_LUID (DEP_CON (dep));
950
 
951
  gcc_assert (true_dependency_cache != NULL
952
              && output_dependency_cache != NULL
953
              && anti_dependency_cache != NULL
954
              && control_dependency_cache != NULL);
955
 
956
  if (!(current_sched_info->flags & USE_DEPS_LIST))
957
    {
958
      enum reg_note present_dep_type;
959
 
960
      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
961
        present_dep_type = REG_DEP_TRUE;
962
      else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
963
        present_dep_type = REG_DEP_OUTPUT;
964
      else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
965
        present_dep_type = REG_DEP_ANTI;
966
      else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
967
        present_dep_type = REG_DEP_CONTROL;
968
      else
969
        /* There is no existing dep so it should be created.  */
970
        return DEP_CREATED;
971
 
972
      if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
973
        /* DEP does not add anything to the existing dependence.  */
974
        return DEP_PRESENT;
975
    }
976
  else
977
    {
978
      ds_t present_dep_types = 0;
979
 
980
      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981
        present_dep_types |= DEP_TRUE;
982
      if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983
        present_dep_types |= DEP_OUTPUT;
984
      if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985
        present_dep_types |= DEP_ANTI;
986
      if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987
        present_dep_types |= DEP_CONTROL;
988
 
989
      if (present_dep_types == 0)
990
        /* There is no existing dep so it should be created.  */
991
        return DEP_CREATED;
992
 
993
      if (!(current_sched_info->flags & DO_SPECULATION)
994
          || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
995
        {
996
          if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
997
              == present_dep_types)
998
            /* DEP does not add anything to the existing dependence.  */
999
            return DEP_PRESENT;
1000
        }
1001
      else
1002
        {
1003
          /* Only true dependencies can be data speculative and
1004
             only anti dependencies can be control speculative.  */
1005
          gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1006
                      == present_dep_types);
1007
 
1008
          /* if (DEP is SPECULATIVE) then
1009
             ..we should update DEP_STATUS
1010
             else
1011
             ..we should reset existing dep to non-speculative.  */
1012
        }
1013
    }
1014
 
1015
  return DEP_CHANGED;
1016
}
1017
 
1018
/* Set dependency caches according to DEP.  */
1019
static void
1020
set_dependency_caches (dep_t dep)
1021
{
1022
  int elem_luid = INSN_LUID (DEP_PRO (dep));
1023
  int insn_luid = INSN_LUID (DEP_CON (dep));
1024
 
1025
  if (!(current_sched_info->flags & USE_DEPS_LIST))
1026
    {
1027
      switch (DEP_TYPE (dep))
1028
        {
1029
        case REG_DEP_TRUE:
1030
          bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1031
          break;
1032
 
1033
        case REG_DEP_OUTPUT:
1034
          bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1035
          break;
1036
 
1037
        case REG_DEP_ANTI:
1038
          bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1039
          break;
1040
 
1041
        case REG_DEP_CONTROL:
1042
          bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1043
          break;
1044
 
1045
        default:
1046
          gcc_unreachable ();
1047
        }
1048
    }
1049
  else
1050
    {
1051
      ds_t ds = DEP_STATUS (dep);
1052
 
1053
      if (ds & DEP_TRUE)
1054
        bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1055
      if (ds & DEP_OUTPUT)
1056
        bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1057
      if (ds & DEP_ANTI)
1058
        bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059
      if (ds & DEP_CONTROL)
1060
        bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1061
 
1062
      if (ds & SPECULATIVE)
1063
        {
1064
          gcc_assert (current_sched_info->flags & DO_SPECULATION);
1065
          bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1066
        }
1067
    }
1068
}
1069
 
1070
/* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1071
   caches accordingly.  */
1072
static void
1073
update_dependency_caches (dep_t dep, enum reg_note old_type)
1074
{
1075
  int elem_luid = INSN_LUID (DEP_PRO (dep));
1076
  int insn_luid = INSN_LUID (DEP_CON (dep));
1077
 
1078
  /* Clear corresponding cache entry because type of the link
1079
     may have changed.  Keep them if we use_deps_list.  */
1080
  if (!(current_sched_info->flags & USE_DEPS_LIST))
1081
    {
1082
      switch (old_type)
1083
        {
1084
        case REG_DEP_OUTPUT:
1085
          bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1086
          break;
1087
 
1088
        case REG_DEP_ANTI:
1089
          bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1090
          break;
1091
 
1092
        case REG_DEP_CONTROL:
1093
          bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1094
          break;
1095
 
1096
        default:
1097
          gcc_unreachable ();
1098
        }
1099
    }
1100
 
1101
  set_dependency_caches (dep);
1102
}
1103
 
1104
/* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1105
static void
1106
change_spec_dep_to_hard (sd_iterator_def sd_it)
1107
{
1108
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1109
  dep_link_t link = DEP_NODE_BACK (node);
1110
  dep_t dep = DEP_NODE_DEP (node);
1111
  rtx elem = DEP_PRO (dep);
1112
  rtx insn = DEP_CON (dep);
1113
 
1114
  move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1115
 
1116
  DEP_STATUS (dep) &= ~SPECULATIVE;
1117
 
1118
  if (true_dependency_cache != NULL)
1119
    /* Clear the cache entry.  */
1120
    bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1121
                      INSN_LUID (elem));
1122
}
1123
 
1124
/* Update DEP to incorporate information from NEW_DEP.
1125
   SD_IT points to DEP in case it should be moved to another list.
1126
   MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1127
   data-speculative dependence should be updated.  */
1128
static enum DEPS_ADJUST_RESULT
1129
update_dep (dep_t dep, dep_t new_dep,
1130
            sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1131
            rtx mem1 ATTRIBUTE_UNUSED,
1132
            rtx mem2 ATTRIBUTE_UNUSED)
1133
{
1134
  enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1135
  enum reg_note old_type = DEP_TYPE (dep);
1136
  bool was_spec = dep_spec_p (dep);
1137
 
1138
  /* If this is a more restrictive type of dependence than the
1139
     existing one, then change the existing dependence to this
1140
     type.  */
1141
  if ((int) DEP_TYPE (new_dep) < (int) old_type)
1142
    {
1143
      DEP_TYPE (dep) = DEP_TYPE (new_dep);
1144
      res = DEP_CHANGED;
1145
    }
1146
 
1147
  if (current_sched_info->flags & USE_DEPS_LIST)
1148
    /* Update DEP_STATUS.  */
1149
    {
1150
      ds_t dep_status = DEP_STATUS (dep);
1151
      ds_t ds = DEP_STATUS (new_dep);
1152
      ds_t new_status = ds | dep_status;
1153
 
1154
      if (new_status & SPECULATIVE)
1155
        {
1156
          /* Either existing dep or a dep we're adding or both are
1157
             speculative.  */
1158
          if (!(ds & SPECULATIVE)
1159
              || !(dep_status & SPECULATIVE))
1160
            /* The new dep can't be speculative.  */
1161
            new_status &= ~SPECULATIVE;
1162
          else
1163
            {
1164
              /* Both are speculative.  Merge probabilities.  */
1165
              if (mem1 != NULL)
1166
                {
1167
                  dw_t dw;
1168
 
1169
                  dw = estimate_dep_weak (mem1, mem2);
1170
                  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1171
                }
1172
 
1173
              new_status = ds_merge (dep_status, ds);
1174
            }
1175
        }
1176
 
1177
      ds = new_status;
1178
 
1179
      if (dep_status != ds)
1180
        {
1181
          DEP_STATUS (dep) = ds;
1182
          res = DEP_CHANGED;
1183
        }
1184
    }
1185
 
1186
  if (was_spec && !dep_spec_p (dep))
1187
    /* The old dep was speculative, but now it isn't.  */
1188
    change_spec_dep_to_hard (sd_it);
1189
 
1190
  if (true_dependency_cache != NULL
1191
      && res == DEP_CHANGED)
1192
    update_dependency_caches (dep, old_type);
1193
 
1194
  return res;
1195
}
1196
 
1197
/* Add or update  a dependence described by DEP.
1198
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
1199
   data speculation.
1200
 
1201
   The function returns a value indicating if an old entry has been changed
1202
   or a new entry has been added to insn's backward deps or nothing has
1203
   been updated at all.  */
1204
static enum DEPS_ADJUST_RESULT
1205
add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1206
                     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1207
{
1208
  bool maybe_present_p = true;
1209
  bool present_p = false;
1210
 
1211
  gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1212
              && DEP_PRO (new_dep) != DEP_CON (new_dep));
1213
 
1214
#ifdef ENABLE_CHECKING
1215
  check_dep (new_dep, mem1 != NULL);
1216
#endif
1217
 
1218
  if (true_dependency_cache != NULL)
1219
    {
1220
      switch (ask_dependency_caches (new_dep))
1221
        {
1222
        case DEP_PRESENT:
1223
          return DEP_PRESENT;
1224
 
1225
        case DEP_CHANGED:
1226
          maybe_present_p = true;
1227
          present_p = true;
1228
          break;
1229
 
1230
        case DEP_CREATED:
1231
          maybe_present_p = false;
1232
          present_p = false;
1233
          break;
1234
 
1235
        default:
1236
          gcc_unreachable ();
1237
          break;
1238
        }
1239
    }
1240
 
1241
  /* Check that we don't already have this dependence.  */
1242
  if (maybe_present_p)
1243
    {
1244
      dep_t present_dep;
1245
      sd_iterator_def sd_it;
1246
 
1247
      gcc_assert (true_dependency_cache == NULL || present_p);
1248
 
1249
      present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1250
                                                  DEP_CON (new_dep),
1251
                                                  resolved_p, &sd_it);
1252
 
1253
      if (present_dep != NULL)
1254
        /* We found an existing dependency between ELEM and INSN.  */
1255
        return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1256
      else
1257
        /* We didn't find a dep, it shouldn't present in the cache.  */
1258
        gcc_assert (!present_p);
1259
    }
1260
 
1261
  /* Might want to check one level of transitivity to save conses.
1262
     This check should be done in maybe_add_or_update_dep_1.
1263
     Since we made it to add_or_update_dep_1, we must create
1264
     (or update) a link.  */
1265
 
1266
  if (mem1 != NULL_RTX)
1267
    {
1268
      gcc_assert (sched_deps_info->generate_spec_deps);
1269
      DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1270
                                           estimate_dep_weak (mem1, mem2));
1271
    }
1272
 
1273
  sd_add_dep (new_dep, resolved_p);
1274
 
1275
  return DEP_CREATED;
1276
}
1277
 
1278
/* Initialize BACK_LIST_PTR with consumer's backward list and
1279
   FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1280
   initialize with lists that hold resolved deps.  */
1281
static void
1282
get_back_and_forw_lists (dep_t dep, bool resolved_p,
1283
                         deps_list_t *back_list_ptr,
1284
                         deps_list_t *forw_list_ptr)
1285
{
1286
  rtx con = DEP_CON (dep);
1287
 
1288
  if (!resolved_p)
1289
    {
1290
      if (dep_spec_p (dep))
1291
        *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1292
      else
1293
        *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1294
 
1295
      *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1296
    }
1297
  else
1298
    {
1299
      *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1300
      *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1301
    }
1302
}
1303
 
1304
/* Add dependence described by DEP.
1305
   If RESOLVED_P is true treat the dependence as a resolved one.  */
1306
void
1307
sd_add_dep (dep_t dep, bool resolved_p)
1308
{
1309
  dep_node_t n = create_dep_node ();
1310
  deps_list_t con_back_deps;
1311
  deps_list_t pro_forw_deps;
1312
  rtx elem = DEP_PRO (dep);
1313
  rtx insn = DEP_CON (dep);
1314
 
1315
  gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1316
 
1317
  if ((current_sched_info->flags & DO_SPECULATION) == 0
1318
      || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1319
    DEP_STATUS (dep) &= ~SPECULATIVE;
1320
 
1321
  copy_dep (DEP_NODE_DEP (n), dep);
1322
 
1323
  get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1324
 
1325
  add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1326
 
1327
#ifdef ENABLE_CHECKING
1328
  check_dep (dep, false);
1329
#endif
1330
 
1331
  add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1332
 
1333
  /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1334
     in the bitmap caches of dependency information.  */
1335
  if (true_dependency_cache != NULL)
1336
    set_dependency_caches (dep);
1337
}
1338
 
1339
/* Add or update backward dependence between INSN and ELEM
1340
   with given type DEP_TYPE and dep_status DS.
1341
   This function is a convenience wrapper.  */
1342
enum DEPS_ADJUST_RESULT
1343
sd_add_or_update_dep (dep_t dep, bool resolved_p)
1344
{
1345
  return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1346
}
1347
 
1348
/* Resolved dependence pointed to by SD_IT.
1349
   SD_IT will advance to the next element.  */
1350
void
1351
sd_resolve_dep (sd_iterator_def sd_it)
1352
{
1353
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1354
  dep_t dep = DEP_NODE_DEP (node);
1355
  rtx pro = DEP_PRO (dep);
1356
  rtx con = DEP_CON (dep);
1357
 
1358
  if (dep_spec_p (dep))
1359
    move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1360
                   INSN_RESOLVED_BACK_DEPS (con));
1361
  else
1362
    move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1363
                   INSN_RESOLVED_BACK_DEPS (con));
1364
 
1365
  move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1366
                 INSN_RESOLVED_FORW_DEPS (pro));
1367
}
1368
 
1369
/* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1370
   pointed to by SD_IT to unresolved state.  */
1371
void
1372
sd_unresolve_dep (sd_iterator_def sd_it)
1373
{
1374
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1375
  dep_t dep = DEP_NODE_DEP (node);
1376
  rtx pro = DEP_PRO (dep);
1377
  rtx con = DEP_CON (dep);
1378
 
1379
  if (dep_spec_p (dep))
1380
    move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1381
                   INSN_SPEC_BACK_DEPS (con));
1382
  else
1383
    move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1384
                   INSN_HARD_BACK_DEPS (con));
1385
 
1386
  move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1387
                 INSN_FORW_DEPS (pro));
1388
}
1389
 
1390
/* Make TO depend on all the FROM's producers.
1391
   If RESOLVED_P is true add dependencies to the resolved lists.  */
1392
void
1393
sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1394
{
1395
  sd_list_types_def list_type;
1396
  sd_iterator_def sd_it;
1397
  dep_t dep;
1398
 
1399
  list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1400
 
1401
  FOR_EACH_DEP (from, list_type, sd_it, dep)
1402
    {
1403
      dep_def _new_dep, *new_dep = &_new_dep;
1404
 
1405
      copy_dep (new_dep, dep);
1406
      DEP_CON (new_dep) = to;
1407
      sd_add_dep (new_dep, resolved_p);
1408
    }
1409
}
1410
 
1411
/* Remove a dependency referred to by SD_IT.
1412
   SD_IT will point to the next dependence after removal.  */
1413
void
1414
sd_delete_dep (sd_iterator_def sd_it)
1415
{
1416
  dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1417
  dep_t dep = DEP_NODE_DEP (n);
1418
  rtx pro = DEP_PRO (dep);
1419
  rtx con = DEP_CON (dep);
1420
  deps_list_t con_back_deps;
1421
  deps_list_t pro_forw_deps;
1422
 
1423
  if (true_dependency_cache != NULL)
1424
    {
1425
      int elem_luid = INSN_LUID (pro);
1426
      int insn_luid = INSN_LUID (con);
1427
 
1428
      bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1429
      bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1430
      bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1431
      bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1432
 
1433
      if (current_sched_info->flags & DO_SPECULATION)
1434
        bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1435
    }
1436
 
1437
  get_back_and_forw_lists (dep, sd_it.resolved_p,
1438
                           &con_back_deps, &pro_forw_deps);
1439
 
1440
  remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1441
  remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1442
 
1443
  delete_dep_node (n);
1444
}
1445
 
1446
/* Dump size of the lists.  */
1447
#define DUMP_LISTS_SIZE (2)
1448
 
1449
/* Dump dependencies of the lists.  */
1450
#define DUMP_LISTS_DEPS (4)
1451
 
1452
/* Dump all information about the lists.  */
1453
#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1454
 
1455
/* Dump deps_lists of INSN specified by TYPES to DUMP.
1456
   FLAGS is a bit mask specifying what information about the lists needs
1457
   to be printed.
1458
   If FLAGS has the very first bit set, then dump all information about
1459
   the lists and propagate this bit into the callee dump functions.  */
1460
static void
1461
dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1462
{
1463
  sd_iterator_def sd_it;
1464
  dep_t dep;
1465
  int all;
1466
 
1467
  all = (flags & 1);
1468
 
1469
  if (all)
1470
    flags |= DUMP_LISTS_ALL;
1471
 
1472
  fprintf (dump, "[");
1473
 
1474
  if (flags & DUMP_LISTS_SIZE)
1475
    fprintf (dump, "%d; ", sd_lists_size (insn, types));
1476
 
1477
  if (flags & DUMP_LISTS_DEPS)
1478
    {
1479
      FOR_EACH_DEP (insn, types, sd_it, dep)
1480
        {
1481
          dump_dep (dump, dep, dump_dep_flags | all);
1482
          fprintf (dump, " ");
1483
        }
1484
    }
1485
}
1486
 
1487
/* Dump all information about deps_lists of INSN specified by TYPES
1488
   to STDERR.  */
1489
void
1490
sd_debug_lists (rtx insn, sd_list_types_def types)
1491
{
1492
  dump_lists (stderr, insn, types, 1);
1493
  fprintf (stderr, "\n");
1494
}
1495
 
1496
/* A wrapper around add_dependence_1, to add a dependence of CON on
1497
   PRO, with type DEP_TYPE.  This function implements special handling
1498
   for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1499
   the type to REG_DEP_ANTI if we can determine that predication is
1500
   impossible; otherwise we add additional true dependencies on the
1501
   INSN_COND_DEPS list of the jump (which PRO must be).  */
1502
void
1503
add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1504
{
1505
  if (dep_type == REG_DEP_CONTROL
1506
      && !(current_sched_info->flags & DO_PREDICATION))
1507
    dep_type = REG_DEP_ANTI;
1508
 
1509
  /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1510
     so we must also make the insn dependent on the setter of the
1511
     condition.  */
1512
  if (dep_type == REG_DEP_CONTROL)
1513
    {
1514
      rtx real_pro = pro;
1515
      rtx other = real_insn_for_shadow (real_pro);
1516
      rtx cond;
1517
 
1518
      if (other != NULL_RTX)
1519
        real_pro = other;
1520
      cond = sched_get_reverse_condition_uncached (real_pro);
1521
      /* Verify that the insn does not use a different value in
1522
         the condition register than the one that was present at
1523
         the jump.  */
1524
      if (cond == NULL_RTX)
1525
        dep_type = REG_DEP_ANTI;
1526
      else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1527
        {
1528
          HARD_REG_SET uses;
1529
          CLEAR_HARD_REG_SET (uses);
1530
          note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1531
          if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1532
            dep_type = REG_DEP_ANTI;
1533
        }
1534
      if (dep_type == REG_DEP_CONTROL)
1535
        {
1536
          if (sched_verbose >= 5)
1537
            fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1538
                     INSN_UID (real_pro));
1539
          add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1540
                               REG_DEP_TRUE);
1541
        }
1542
    }
1543
 
1544
  add_dependence_1 (con, pro, dep_type);
1545
}
1546
 
1547
/* A convenience wrapper to operate on an entire list.  */
1548
 
1549
static void
1550
add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1551
{
1552
  for (; list; list = XEXP (list, 1))
1553
    {
1554
      if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1555
        add_dependence (insn, XEXP (list, 0), dep_type);
1556
    }
1557
}
1558
 
1559
/* Similar, but free *LISTP at the same time, when the context
1560
   is not readonly.  */
1561
 
1562
static void
1563
add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1564
                              int uncond, enum reg_note dep_type)
1565
{
1566
  rtx list, next;
1567
 
1568
  /* We don't want to short-circuit dependencies involving debug
1569
     insns, because they may cause actual dependencies to be
1570
     disregarded.  */
1571
  if (deps->readonly || DEBUG_INSN_P (insn))
1572
    {
1573
      add_dependence_list (insn, *listp, uncond, dep_type);
1574
      return;
1575
    }
1576
 
1577
  for (list = *listp, *listp = NULL; list ; list = next)
1578
    {
1579
      next = XEXP (list, 1);
1580
      if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1581
        add_dependence (insn, XEXP (list, 0), dep_type);
1582
      free_INSN_LIST_node (list);
1583
    }
1584
}
1585
 
1586
/* Remove all occurences of INSN from LIST.  Return the number of
1587
   occurences removed.  */
1588
 
1589
static int
1590
remove_from_dependence_list (rtx insn, rtx* listp)
1591
{
1592
  int removed = 0;
1593
 
1594
  while (*listp)
1595
    {
1596
      if (XEXP (*listp, 0) == insn)
1597
        {
1598
          remove_free_INSN_LIST_node (listp);
1599
          removed++;
1600
          continue;
1601
        }
1602
 
1603
      listp = &XEXP (*listp, 1);
1604
    }
1605
 
1606
  return removed;
1607
}
1608
 
1609
/* Same as above, but process two lists at once.  */
1610
static int
1611
remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1612
{
1613
  int removed = 0;
1614
 
1615
  while (*listp)
1616
    {
1617
      if (XEXP (*listp, 0) == insn)
1618
        {
1619
          remove_free_INSN_LIST_node (listp);
1620
          remove_free_EXPR_LIST_node (exprp);
1621
          removed++;
1622
          continue;
1623
        }
1624
 
1625
      listp = &XEXP (*listp, 1);
1626
      exprp = &XEXP (*exprp, 1);
1627
    }
1628
 
1629
  return removed;
1630
}
1631
 
1632
/* Clear all dependencies for an insn.  */
1633
static void
1634
delete_all_dependences (rtx insn)
1635
{
1636
  sd_iterator_def sd_it;
1637
  dep_t dep;
1638
 
1639
  /* The below cycle can be optimized to clear the caches and back_deps
1640
     in one call but that would provoke duplication of code from
1641
     delete_dep ().  */
1642
 
1643
  for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1644
       sd_iterator_cond (&sd_it, &dep);)
1645
    sd_delete_dep (sd_it);
1646
}
1647
 
1648
/* All insns in a scheduling group except the first should only have
1649
   dependencies on the previous insn in the group.  So we find the
1650
   first instruction in the scheduling group by walking the dependence
1651
   chains backwards. Then we add the dependencies for the group to
1652
   the previous nonnote insn.  */
1653
 
1654
static void
1655
fixup_sched_groups (rtx insn)
1656
{
1657
  sd_iterator_def sd_it;
1658
  dep_t dep;
1659
  rtx prev_nonnote;
1660
 
1661
  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1662
    {
1663
      rtx i = insn;
1664
      rtx pro = DEP_PRO (dep);
1665
 
1666
      do
1667
        {
1668
          i = prev_nonnote_insn (i);
1669
 
1670
          if (pro == i)
1671
            goto next_link;
1672
        } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1673
 
1674
      if (! sched_insns_conditions_mutex_p (i, pro))
1675
        add_dependence (i, pro, DEP_TYPE (dep));
1676
    next_link:;
1677
    }
1678
 
1679
  delete_all_dependences (insn);
1680
 
1681
  prev_nonnote = prev_nonnote_nondebug_insn (insn);
1682
  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1683
      && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1684
    add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1685
}
1686
 
1687
/* Process an insn's memory dependencies.  There are four kinds of
1688
   dependencies:
1689
 
1690
   (0) read dependence: read follows read
1691
   (1) true dependence: read follows write
1692
   (2) output dependence: write follows write
1693
   (3) anti dependence: write follows read
1694
 
1695
   We are careful to build only dependencies which actually exist, and
1696
   use transitivity to avoid building too many links.  */
1697
 
1698
/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1699
   The MEM is a memory reference contained within INSN, which we are saving
1700
   so that we can do memory aliasing on it.  */
1701
 
1702
static void
1703
add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1704
                         rtx insn, rtx mem)
1705
{
1706
  rtx *insn_list;
1707
  rtx *mem_list;
1708
  rtx link;
1709
 
1710
  gcc_assert (!deps->readonly);
1711
  if (read_p)
1712
    {
1713
      insn_list = &deps->pending_read_insns;
1714
      mem_list = &deps->pending_read_mems;
1715
      if (!DEBUG_INSN_P (insn))
1716
        deps->pending_read_list_length++;
1717
    }
1718
  else
1719
    {
1720
      insn_list = &deps->pending_write_insns;
1721
      mem_list = &deps->pending_write_mems;
1722
      deps->pending_write_list_length++;
1723
    }
1724
 
1725
  link = alloc_INSN_LIST (insn, *insn_list);
1726
  *insn_list = link;
1727
 
1728
  if (sched_deps_info->use_cselib)
1729
    {
1730
      mem = shallow_copy_rtx (mem);
1731
      XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1732
                                                        GET_MODE (mem), insn);
1733
    }
1734
  link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1735
  *mem_list = link;
1736
}
1737
 
1738
/* Make a dependency between every memory reference on the pending lists
1739
   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1740
   dependencies for a read operation, similarly with FOR_WRITE.  */
1741
 
1742
static void
1743
flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1744
                     int for_write)
1745
{
1746
  if (for_write)
1747
    {
1748
      add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1749
                                    1, REG_DEP_ANTI);
1750
      if (!deps->readonly)
1751
        {
1752
          free_EXPR_LIST_list (&deps->pending_read_mems);
1753
          deps->pending_read_list_length = 0;
1754
        }
1755
    }
1756
 
1757
  add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1758
                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1759
 
1760
  add_dependence_list_and_free (deps, insn,
1761
                                &deps->last_pending_memory_flush, 1,
1762
                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1763
 
1764
  add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1765
                                REG_DEP_ANTI);
1766
 
1767
  if (!deps->readonly)
1768
    {
1769
      free_EXPR_LIST_list (&deps->pending_write_mems);
1770
      deps->pending_write_list_length = 0;
1771
 
1772
      deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1773
      deps->pending_flush_length = 1;
1774
    }
1775
}
1776
 
1777
/* Instruction which dependencies we are analyzing.  */
1778
static rtx cur_insn = NULL_RTX;
1779
 
1780
/* Implement hooks for haifa scheduler.  */
1781
 
1782
static void
1783
haifa_start_insn (rtx insn)
1784
{
1785
  gcc_assert (insn && !cur_insn);
1786
 
1787
  cur_insn = insn;
1788
}
1789
 
1790
static void
1791
haifa_finish_insn (void)
1792
{
1793
  cur_insn = NULL;
1794
}
1795
 
1796
void
1797
haifa_note_reg_set (int regno)
1798
{
1799
  SET_REGNO_REG_SET (reg_pending_sets, regno);
1800
}
1801
 
1802
void
1803
haifa_note_reg_clobber (int regno)
1804
{
1805
  SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1806
}
1807
 
1808
void
1809
haifa_note_reg_use (int regno)
1810
{
1811
  SET_REGNO_REG_SET (reg_pending_uses, regno);
1812
}
1813
 
1814
static void
1815
haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1816
{
1817
  if (!(ds & SPECULATIVE))
1818
    {
1819
      mem = NULL_RTX;
1820
      pending_mem = NULL_RTX;
1821
    }
1822
  else
1823
    gcc_assert (ds & BEGIN_DATA);
1824
 
1825
  {
1826
    dep_def _dep, *dep = &_dep;
1827
 
1828
    init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1829
                current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1830
    maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1831
  }
1832
 
1833
}
1834
 
1835
static void
1836
haifa_note_dep (rtx elem, ds_t ds)
1837
{
1838
  dep_def _dep;
1839
  dep_t dep = &_dep;
1840
 
1841
  init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1842
  maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1843
}
1844
 
1845
static void
1846
note_reg_use (int r)
1847
{
1848
  if (sched_deps_info->note_reg_use)
1849
    sched_deps_info->note_reg_use (r);
1850
}
1851
 
1852
static void
1853
note_reg_set (int r)
1854
{
1855
  if (sched_deps_info->note_reg_set)
1856
    sched_deps_info->note_reg_set (r);
1857
}
1858
 
1859
static void
1860
note_reg_clobber (int r)
1861
{
1862
  if (sched_deps_info->note_reg_clobber)
1863
    sched_deps_info->note_reg_clobber (r);
1864
}
1865
 
1866
static void
1867
note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1868
{
1869
  if (sched_deps_info->note_mem_dep)
1870
    sched_deps_info->note_mem_dep (m1, m2, e, ds);
1871
}
1872
 
1873
static void
1874
note_dep (rtx e, ds_t ds)
1875
{
1876
  if (sched_deps_info->note_dep)
1877
    sched_deps_info->note_dep (e, ds);
1878
}
1879
 
1880
/* Return corresponding to DS reg_note.  */
1881
enum reg_note
1882
ds_to_dt (ds_t ds)
1883
{
1884
  if (ds & DEP_TRUE)
1885
    return REG_DEP_TRUE;
1886
  else if (ds & DEP_OUTPUT)
1887
    return REG_DEP_OUTPUT;
1888
  else if (ds & DEP_ANTI)
1889
    return REG_DEP_ANTI;
1890
  else
1891
    {
1892
      gcc_assert (ds & DEP_CONTROL);
1893
      return REG_DEP_CONTROL;
1894
    }
1895
}
1896
 
1897
 
1898
 
1899
/* Functions for computation of info needed for register pressure
1900
   sensitive insn scheduling.  */
1901
 
1902
 
1903
/* Allocate and return reg_use_data structure for REGNO and INSN.  */
1904
static struct reg_use_data *
1905
create_insn_reg_use (int regno, rtx insn)
1906
{
1907
  struct reg_use_data *use;
1908
 
1909
  use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1910
  use->regno = regno;
1911
  use->insn = insn;
1912
  use->next_insn_use = INSN_REG_USE_LIST (insn);
1913
  INSN_REG_USE_LIST (insn) = use;
1914
  return use;
1915
}
1916
 
1917
/* Allocate and return reg_set_data structure for REGNO and INSN.  */
1918
static struct reg_set_data *
1919
create_insn_reg_set (int regno, rtx insn)
1920
{
1921
  struct reg_set_data *set;
1922
 
1923
  set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1924
  set->regno = regno;
1925
  set->insn = insn;
1926
  set->next_insn_set = INSN_REG_SET_LIST (insn);
1927
  INSN_REG_SET_LIST (insn) = set;
1928
  return set;
1929
}
1930
 
1931
/* Set up insn register uses for INSN and dependency context DEPS.  */
1932
static void
1933
setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1934
{
1935
  unsigned i;
1936
  reg_set_iterator rsi;
1937
  rtx list;
1938
  struct reg_use_data *use, *use2, *next;
1939
  struct deps_reg *reg_last;
1940
 
1941
  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1942
    {
1943
      if (i < FIRST_PSEUDO_REGISTER
1944
          && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1945
        continue;
1946
 
1947
      if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1948
          && ! REGNO_REG_SET_P (reg_pending_sets, i)
1949
          && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1950
        /* Ignore use which is not dying.  */
1951
        continue;
1952
 
1953
      use = create_insn_reg_use (i, insn);
1954
      use->next_regno_use = use;
1955
      reg_last = &deps->reg_last[i];
1956
 
1957
      /* Create the cycle list of uses.  */
1958
      for (list = reg_last->uses; list; list = XEXP (list, 1))
1959
        {
1960
          use2 = create_insn_reg_use (i, XEXP (list, 0));
1961
          next = use->next_regno_use;
1962
          use->next_regno_use = use2;
1963
          use2->next_regno_use = next;
1964
        }
1965
    }
1966
}
1967
 
1968
/* Register pressure info for the currently processed insn.  */
1969
static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1970
 
1971
/* Return TRUE if INSN has the use structure for REGNO.  */
1972
static bool
1973
insn_use_p (rtx insn, int regno)
1974
{
1975
  struct reg_use_data *use;
1976
 
1977
  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1978
    if (use->regno == regno)
1979
      return true;
1980
  return false;
1981
}
1982
 
1983
/* Update the register pressure info after birth of pseudo register REGNO
1984
   in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
1985
   the register is in clobber or unused after the insn.  */
1986
static void
1987
mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1988
{
1989
  int incr, new_incr;
1990
  enum reg_class cl;
1991
 
1992
  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1993
  cl = sched_regno_pressure_class[regno];
1994
  if (cl != NO_REGS)
1995
    {
1996
      incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1997
      if (clobber_p)
1998
        {
1999
          new_incr = reg_pressure_info[cl].clobber_increase + incr;
2000
          reg_pressure_info[cl].clobber_increase = new_incr;
2001
        }
2002
      else if (unused_p)
2003
        {
2004
          new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2005
          reg_pressure_info[cl].unused_set_increase = new_incr;
2006
        }
2007
      else
2008
        {
2009
          new_incr = reg_pressure_info[cl].set_increase + incr;
2010
          reg_pressure_info[cl].set_increase = new_incr;
2011
          if (! insn_use_p (insn, regno))
2012
            reg_pressure_info[cl].change += incr;
2013
          create_insn_reg_set (regno, insn);
2014
        }
2015
      gcc_assert (new_incr < (1 << INCREASE_BITS));
2016
    }
2017
}
2018
 
2019
/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2020
   hard registers involved in the birth.  */
2021
static void
2022
mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2023
                            bool clobber_p, bool unused_p)
2024
{
2025
  enum reg_class cl;
2026
  int new_incr, last = regno + nregs;
2027
 
2028
  while (regno < last)
2029
    {
2030
      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2031
      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2032
        {
2033
          cl = sched_regno_pressure_class[regno];
2034
          if (cl != NO_REGS)
2035
            {
2036
              if (clobber_p)
2037
                {
2038
                  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2039
                  reg_pressure_info[cl].clobber_increase = new_incr;
2040
                }
2041
              else if (unused_p)
2042
                {
2043
                  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2044
                  reg_pressure_info[cl].unused_set_increase = new_incr;
2045
                }
2046
              else
2047
                {
2048
                  new_incr = reg_pressure_info[cl].set_increase + 1;
2049
                  reg_pressure_info[cl].set_increase = new_incr;
2050
                  if (! insn_use_p (insn, regno))
2051
                    reg_pressure_info[cl].change += 1;
2052
                  create_insn_reg_set (regno, insn);
2053
                }
2054
              gcc_assert (new_incr < (1 << INCREASE_BITS));
2055
            }
2056
        }
2057
      regno++;
2058
    }
2059
}
2060
 
2061
/* Update the register pressure info after birth of pseudo or hard
2062
   register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2063
   correspondingly that the register is in clobber or unused after the
2064
   insn.  */
2065
static void
2066
mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2067
{
2068
  int regno;
2069
 
2070
  if (GET_CODE (reg) == SUBREG)
2071
    reg = SUBREG_REG (reg);
2072
 
2073
  if (! REG_P (reg))
2074
    return;
2075
 
2076
  regno = REGNO (reg);
2077
  if (regno < FIRST_PSEUDO_REGISTER)
2078
    mark_insn_hard_regno_birth (insn, regno,
2079
                                hard_regno_nregs[regno][GET_MODE (reg)],
2080
                                clobber_p, unused_p);
2081
  else
2082
    mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2083
}
2084
 
2085
/* Update the register pressure info after death of pseudo register
2086
   REGNO.  */
2087
static void
2088
mark_pseudo_death (int regno)
2089
{
2090
  int incr;
2091
  enum reg_class cl;
2092
 
2093
  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2094
  cl = sched_regno_pressure_class[regno];
2095
  if (cl != NO_REGS)
2096
    {
2097
      incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2098
      reg_pressure_info[cl].change -= incr;
2099
    }
2100
}
2101
 
2102
/* Like mark_pseudo_death except that NREGS saying how many hard
2103
   registers involved in the death.  */
2104
static void
2105
mark_hard_regno_death (int regno, int nregs)
2106
{
2107
  enum reg_class cl;
2108
  int last = regno + nregs;
2109
 
2110
  while (regno < last)
2111
    {
2112
      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2113
      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2114
        {
2115
          cl = sched_regno_pressure_class[regno];
2116
          if (cl != NO_REGS)
2117
            reg_pressure_info[cl].change -= 1;
2118
        }
2119
      regno++;
2120
    }
2121
}
2122
 
2123
/* Update the register pressure info after death of pseudo or hard
2124
   register REG.  */
2125
static void
2126
mark_reg_death (rtx reg)
2127
{
2128
  int regno;
2129
 
2130
  if (GET_CODE (reg) == SUBREG)
2131
    reg = SUBREG_REG (reg);
2132
 
2133
  if (! REG_P (reg))
2134
    return;
2135
 
2136
  regno = REGNO (reg);
2137
  if (regno < FIRST_PSEUDO_REGISTER)
2138
    mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2139
  else
2140
    mark_pseudo_death (regno);
2141
}
2142
 
2143
/* Process SETTER of REG.  DATA is an insn containing the setter.  */
2144
static void
2145
mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2146
{
2147
  if (setter != NULL_RTX && GET_CODE (setter) != SET)
2148
    return;
2149
  mark_insn_reg_birth
2150
    ((rtx) data, reg, false,
2151
     find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2152
}
2153
 
2154
/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2155
static void
2156
mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2157
{
2158
  if (GET_CODE (setter) == CLOBBER)
2159
    mark_insn_reg_birth ((rtx) data, reg, true, false);
2160
}
2161
 
2162
/* Set up reg pressure info related to INSN.  */
2163
void
2164
init_insn_reg_pressure_info (rtx insn)
2165
{
2166
  int i, len;
2167
  enum reg_class cl;
2168
  static struct reg_pressure_data *pressure_info;
2169
  rtx link;
2170
 
2171
  gcc_assert (sched_pressure_p);
2172
 
2173
  if (! INSN_P (insn))
2174
    return;
2175
 
2176
  for (i = 0; i < ira_pressure_classes_num; i++)
2177
    {
2178
      cl = ira_pressure_classes[i];
2179
      reg_pressure_info[cl].clobber_increase = 0;
2180
      reg_pressure_info[cl].set_increase = 0;
2181
      reg_pressure_info[cl].unused_set_increase = 0;
2182
      reg_pressure_info[cl].change = 0;
2183
    }
2184
 
2185
  note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2186
 
2187
  note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2188
 
2189
#ifdef AUTO_INC_DEC
2190
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2191
    if (REG_NOTE_KIND (link) == REG_INC)
2192
      mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2193
#endif
2194
 
2195
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2196
    if (REG_NOTE_KIND (link) == REG_DEAD)
2197
      mark_reg_death (XEXP (link, 0));
2198
 
2199
  len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2200
  pressure_info
2201
    = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2202
  INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2203
                                                  * sizeof (int), 1);
2204
  for (i = 0; i < ira_pressure_classes_num; i++)
2205
    {
2206
      cl = ira_pressure_classes[i];
2207
      pressure_info[i].clobber_increase
2208
        = reg_pressure_info[cl].clobber_increase;
2209
      pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2210
      pressure_info[i].unused_set_increase
2211
        = reg_pressure_info[cl].unused_set_increase;
2212
      pressure_info[i].change = reg_pressure_info[cl].change;
2213
    }
2214
}
2215
 
2216
 
2217
 
2218
 
2219
/* Internal variable for sched_analyze_[12] () functions.
2220
   If it is nonzero, this means that sched_analyze_[12] looks
2221
   at the most toplevel SET.  */
2222
static bool can_start_lhs_rhs_p;
2223
 
2224
/* Extend reg info for the deps context DEPS given that
2225
   we have just generated a register numbered REGNO.  */
2226
static void
2227
extend_deps_reg_info (struct deps_desc *deps, int regno)
2228
{
2229
  int max_regno = regno + 1;
2230
 
2231
  gcc_assert (!reload_completed);
2232
 
2233
  /* In a readonly context, it would not hurt to extend info,
2234
     but it should not be needed.  */
2235
  if (reload_completed && deps->readonly)
2236
    {
2237
      deps->max_reg = max_regno;
2238
      return;
2239
    }
2240
 
2241
  if (max_regno > deps->max_reg)
2242
    {
2243
      deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2244
                                   max_regno);
2245
      memset (&deps->reg_last[deps->max_reg],
2246
              0, (max_regno - deps->max_reg)
2247
              * sizeof (struct deps_reg));
2248
      deps->max_reg = max_regno;
2249
    }
2250
}
2251
 
2252
/* Extends REG_INFO_P if needed.  */
2253
void
2254
maybe_extend_reg_info_p (void)
2255
{
2256
  /* Extend REG_INFO_P, if needed.  */
2257
  if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2258
    {
2259
      size_t new_reg_info_p_size = max_regno + 128;
2260
 
2261
      gcc_assert (!reload_completed && sel_sched_p ());
2262
 
2263
      reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2264
                                                    new_reg_info_p_size,
2265
                                                    reg_info_p_size,
2266
                                                    sizeof (*reg_info_p));
2267
      reg_info_p_size = new_reg_info_p_size;
2268
    }
2269
}
2270
 
2271
/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2272
   The type of the reference is specified by REF and can be SET,
2273
   CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2274
 
2275
static void
2276
sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2277
                   enum rtx_code ref, rtx insn)
2278
{
2279
  /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2280
  if (!reload_completed && sel_sched_p ()
2281
      && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2282
    extend_deps_reg_info (deps, regno);
2283
 
2284
  maybe_extend_reg_info_p ();
2285
 
2286
  /* A hard reg in a wide mode may really be multiple registers.
2287
     If so, mark all of them just like the first.  */
2288
  if (regno < FIRST_PSEUDO_REGISTER)
2289
    {
2290
      int i = hard_regno_nregs[regno][mode];
2291
      if (ref == SET)
2292
        {
2293
          while (--i >= 0)
2294
            note_reg_set (regno + i);
2295
        }
2296
      else if (ref == USE)
2297
        {
2298
          while (--i >= 0)
2299
            note_reg_use (regno + i);
2300
        }
2301
      else
2302
        {
2303
          while (--i >= 0)
2304
            note_reg_clobber (regno + i);
2305
        }
2306
    }
2307
 
2308
  /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2309
     it does not reload.  Ignore these as they have served their
2310
     purpose already.  */
2311
  else if (regno >= deps->max_reg)
2312
    {
2313
      enum rtx_code code = GET_CODE (PATTERN (insn));
2314
      gcc_assert (code == USE || code == CLOBBER);
2315
    }
2316
 
2317
  else
2318
    {
2319
      if (ref == SET)
2320
        note_reg_set (regno);
2321
      else if (ref == USE)
2322
        note_reg_use (regno);
2323
      else
2324
        note_reg_clobber (regno);
2325
 
2326
      /* Pseudos that are REG_EQUIV to something may be replaced
2327
         by that during reloading.  We need only add dependencies for
2328
        the address in the REG_EQUIV note.  */
2329
      if (!reload_completed && get_reg_known_equiv_p (regno))
2330
        {
2331
          rtx t = get_reg_known_value (regno);
2332
          if (MEM_P (t))
2333
            sched_analyze_2 (deps, XEXP (t, 0), insn);
2334
        }
2335
 
2336
      /* Don't let it cross a call after scheduling if it doesn't
2337
         already cross one.  */
2338
      if (REG_N_CALLS_CROSSED (regno) == 0)
2339
        {
2340
          if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2341
            deps->sched_before_next_call
2342
              = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2343
          else
2344
            add_dependence_list (insn, deps->last_function_call, 1,
2345
                                 REG_DEP_ANTI);
2346
        }
2347
    }
2348
}
2349
 
2350
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2351
   rtx, X, creating all dependencies generated by the write to the
2352
   destination of X, and reads of everything mentioned.  */
2353
 
2354
static void
2355
sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2356
{
2357
  rtx dest = XEXP (x, 0);
2358
  enum rtx_code code = GET_CODE (x);
2359
  bool cslr_p = can_start_lhs_rhs_p;
2360
 
2361
  can_start_lhs_rhs_p = false;
2362
 
2363
  gcc_assert (dest);
2364
  if (dest == 0)
2365
    return;
2366
 
2367
  if (cslr_p && sched_deps_info->start_lhs)
2368
    sched_deps_info->start_lhs (dest);
2369
 
2370
  if (GET_CODE (dest) == PARALLEL)
2371
    {
2372
      int i;
2373
 
2374
      for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2375
        if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2376
          sched_analyze_1 (deps,
2377
                           gen_rtx_CLOBBER (VOIDmode,
2378
                                            XEXP (XVECEXP (dest, 0, i), 0)),
2379
                           insn);
2380
 
2381
      if (cslr_p && sched_deps_info->finish_lhs)
2382
        sched_deps_info->finish_lhs ();
2383
 
2384
      if (code == SET)
2385
        {
2386
          can_start_lhs_rhs_p = cslr_p;
2387
 
2388
          sched_analyze_2 (deps, SET_SRC (x), insn);
2389
 
2390
          can_start_lhs_rhs_p = false;
2391
        }
2392
 
2393
      return;
2394
    }
2395
 
2396
  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2397
         || GET_CODE (dest) == ZERO_EXTRACT)
2398
    {
2399
      if (GET_CODE (dest) == STRICT_LOW_PART
2400
         || GET_CODE (dest) == ZERO_EXTRACT
2401
         || df_read_modify_subreg_p (dest))
2402
        {
2403
          /* These both read and modify the result.  We must handle
2404
             them as writes to get proper dependencies for following
2405
             instructions.  We must handle them as reads to get proper
2406
             dependencies from this to previous instructions.
2407
             Thus we need to call sched_analyze_2.  */
2408
 
2409
          sched_analyze_2 (deps, XEXP (dest, 0), insn);
2410
        }
2411
      if (GET_CODE (dest) == ZERO_EXTRACT)
2412
        {
2413
          /* The second and third arguments are values read by this insn.  */
2414
          sched_analyze_2 (deps, XEXP (dest, 1), insn);
2415
          sched_analyze_2 (deps, XEXP (dest, 2), insn);
2416
        }
2417
      dest = XEXP (dest, 0);
2418
    }
2419
 
2420
  if (REG_P (dest))
2421
    {
2422
      int regno = REGNO (dest);
2423
      enum machine_mode mode = GET_MODE (dest);
2424
 
2425
      sched_analyze_reg (deps, regno, mode, code, insn);
2426
 
2427
#ifdef STACK_REGS
2428
      /* Treat all writes to a stack register as modifying the TOS.  */
2429
      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2430
        {
2431
          /* Avoid analyzing the same register twice.  */
2432
          if (regno != FIRST_STACK_REG)
2433
            sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2434
 
2435
          add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2436
                               FIRST_STACK_REG);
2437
        }
2438
#endif
2439
    }
2440
  else if (MEM_P (dest))
2441
    {
2442
      /* Writing memory.  */
2443
      rtx t = dest;
2444
 
2445
      if (sched_deps_info->use_cselib)
2446
        {
2447
          enum machine_mode address_mode
2448
            = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2449
 
2450
          t = shallow_copy_rtx (dest);
2451
          cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2452
                                   GET_MODE (t), insn);
2453
          XEXP (t, 0)
2454
            = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2455
                                                insn);
2456
        }
2457
      t = canon_rtx (t);
2458
 
2459
      /* Pending lists can't get larger with a readonly context.  */
2460
      if (!deps->readonly
2461
          && ((deps->pending_read_list_length + deps->pending_write_list_length)
2462
              > MAX_PENDING_LIST_LENGTH))
2463
        {
2464
          /* Flush all pending reads and writes to prevent the pending lists
2465
             from getting any larger.  Insn scheduling runs too slowly when
2466
             these lists get long.  When compiling GCC with itself,
2467
             this flush occurs 8 times for sparc, and 10 times for m88k using
2468
             the default value of 32.  */
2469
          flush_pending_lists (deps, insn, false, true);
2470
        }
2471
      else
2472
        {
2473
          rtx pending, pending_mem;
2474
 
2475
          pending = deps->pending_read_insns;
2476
          pending_mem = deps->pending_read_mems;
2477
          while (pending)
2478
            {
2479
              if (anti_dependence (XEXP (pending_mem, 0), t)
2480
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2481
                note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2482
                              DEP_ANTI);
2483
 
2484
              pending = XEXP (pending, 1);
2485
              pending_mem = XEXP (pending_mem, 1);
2486
            }
2487
 
2488
          pending = deps->pending_write_insns;
2489
          pending_mem = deps->pending_write_mems;
2490
          while (pending)
2491
            {
2492
              if (output_dependence (XEXP (pending_mem, 0), t)
2493
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2494
                note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2495
                              DEP_OUTPUT);
2496
 
2497
              pending = XEXP (pending, 1);
2498
              pending_mem = XEXP (pending_mem, 1);
2499
            }
2500
 
2501
          add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2502
                               REG_DEP_ANTI);
2503
          add_dependence_list (insn, deps->pending_jump_insns, 1,
2504
                               REG_DEP_CONTROL);
2505
 
2506
          if (!deps->readonly)
2507
            add_insn_mem_dependence (deps, false, insn, dest);
2508
        }
2509
      sched_analyze_2 (deps, XEXP (dest, 0), insn);
2510
    }
2511
 
2512
  if (cslr_p && sched_deps_info->finish_lhs)
2513
    sched_deps_info->finish_lhs ();
2514
 
2515
  /* Analyze reads.  */
2516
  if (GET_CODE (x) == SET)
2517
    {
2518
      can_start_lhs_rhs_p = cslr_p;
2519
 
2520
      sched_analyze_2 (deps, SET_SRC (x), insn);
2521
 
2522
      can_start_lhs_rhs_p = false;
2523
    }
2524
}
2525
 
2526
/* Analyze the uses of memory and registers in rtx X in INSN.  */
2527
static void
2528
sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2529
{
2530
  int i;
2531
  int j;
2532
  enum rtx_code code;
2533
  const char *fmt;
2534
  bool cslr_p = can_start_lhs_rhs_p;
2535
 
2536
  can_start_lhs_rhs_p = false;
2537
 
2538
  gcc_assert (x);
2539
  if (x == 0)
2540
    return;
2541
 
2542
  if (cslr_p && sched_deps_info->start_rhs)
2543
    sched_deps_info->start_rhs (x);
2544
 
2545
  code = GET_CODE (x);
2546
 
2547
  switch (code)
2548
    {
2549
    case CONST_INT:
2550
    case CONST_DOUBLE:
2551
    case CONST_FIXED:
2552
    case CONST_VECTOR:
2553
    case SYMBOL_REF:
2554
    case CONST:
2555
    case LABEL_REF:
2556
      /* Ignore constants.  */
2557
      if (cslr_p && sched_deps_info->finish_rhs)
2558
        sched_deps_info->finish_rhs ();
2559
 
2560
      return;
2561
 
2562
#ifdef HAVE_cc0
2563
    case CC0:
2564
      /* User of CC0 depends on immediately preceding insn.  */
2565
      SCHED_GROUP_P (insn) = 1;
2566
       /* Don't move CC0 setter to another block (it can set up the
2567
        same flag for previous CC0 users which is safe).  */
2568
      CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2569
 
2570
      if (cslr_p && sched_deps_info->finish_rhs)
2571
        sched_deps_info->finish_rhs ();
2572
 
2573
      return;
2574
#endif
2575
 
2576
    case REG:
2577
      {
2578
        int regno = REGNO (x);
2579
        enum machine_mode mode = GET_MODE (x);
2580
 
2581
        sched_analyze_reg (deps, regno, mode, USE, insn);
2582
 
2583
#ifdef STACK_REGS
2584
      /* Treat all reads of a stack register as modifying the TOS.  */
2585
      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2586
        {
2587
          /* Avoid analyzing the same register twice.  */
2588
          if (regno != FIRST_STACK_REG)
2589
            sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2590
          sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2591
        }
2592
#endif
2593
 
2594
        if (cslr_p && sched_deps_info->finish_rhs)
2595
          sched_deps_info->finish_rhs ();
2596
 
2597
        return;
2598
      }
2599
 
2600
    case MEM:
2601
      {
2602
        /* Reading memory.  */
2603
        rtx u;
2604
        rtx pending, pending_mem;
2605
        rtx t = x;
2606
 
2607
        if (sched_deps_info->use_cselib)
2608
          {
2609
            enum machine_mode address_mode
2610
              = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2611
 
2612
            t = shallow_copy_rtx (t);
2613
            cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2614
                                     GET_MODE (t), insn);
2615
            XEXP (t, 0)
2616
              = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2617
                                                  insn);
2618
          }
2619
 
2620
        if (!DEBUG_INSN_P (insn))
2621
          {
2622
            t = canon_rtx (t);
2623
            pending = deps->pending_read_insns;
2624
            pending_mem = deps->pending_read_mems;
2625
            while (pending)
2626
              {
2627
                if (read_dependence (XEXP (pending_mem, 0), t)
2628
                    && ! sched_insns_conditions_mutex_p (insn,
2629
                                                         XEXP (pending, 0)))
2630
                  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2631
                                DEP_ANTI);
2632
 
2633
                pending = XEXP (pending, 1);
2634
                pending_mem = XEXP (pending_mem, 1);
2635
              }
2636
 
2637
            pending = deps->pending_write_insns;
2638
            pending_mem = deps->pending_write_mems;
2639
            while (pending)
2640
              {
2641
                if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2642
                    && ! sched_insns_conditions_mutex_p (insn,
2643
                                                         XEXP (pending, 0)))
2644
                  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2645
                                sched_deps_info->generate_spec_deps
2646
                                ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2647
 
2648
                pending = XEXP (pending, 1);
2649
                pending_mem = XEXP (pending_mem, 1);
2650
              }
2651
 
2652
            for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2653
              add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2654
 
2655
            for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2656
              if (deps_may_trap_p (x))
2657
                {
2658
                  if ((sched_deps_info->generate_spec_deps)
2659
                      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2660
                    {
2661
                      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2662
                                              MAX_DEP_WEAK);
2663
 
2664
                      note_dep (XEXP (u, 0), ds);
2665
                    }
2666
                  else
2667
                    add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2668
                }
2669
          }
2670
 
2671
        /* Always add these dependencies to pending_reads, since
2672
           this insn may be followed by a write.  */
2673
        if (!deps->readonly)
2674
          add_insn_mem_dependence (deps, true, insn, x);
2675
 
2676
        sched_analyze_2 (deps, XEXP (x, 0), insn);
2677
 
2678
        if (cslr_p && sched_deps_info->finish_rhs)
2679
          sched_deps_info->finish_rhs ();
2680
 
2681
        return;
2682
      }
2683
 
2684
    /* Force pending stores to memory in case a trap handler needs them.  */
2685
    case TRAP_IF:
2686
      flush_pending_lists (deps, insn, true, false);
2687
      break;
2688
 
2689
    case PREFETCH:
2690
      if (PREFETCH_SCHEDULE_BARRIER_P (x))
2691
        reg_pending_barrier = TRUE_BARRIER;
2692
      break;
2693
 
2694
    case UNSPEC_VOLATILE:
2695
      flush_pending_lists (deps, insn, true, true);
2696
      /* FALLTHRU */
2697
 
2698
    case ASM_OPERANDS:
2699
    case ASM_INPUT:
2700
      {
2701
        /* Traditional and volatile asm instructions must be considered to use
2702
           and clobber all hard registers, all pseudo-registers and all of
2703
           memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2704
 
2705
           Consider for instance a volatile asm that changes the fpu rounding
2706
           mode.  An insn should not be moved across this even if it only uses
2707
           pseudo-regs because it might give an incorrectly rounded result.  */
2708
        if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2709
          reg_pending_barrier = TRUE_BARRIER;
2710
 
2711
        /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2712
           We can not just fall through here since then we would be confused
2713
           by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2714
           traditional asms unlike their normal usage.  */
2715
 
2716
        if (code == ASM_OPERANDS)
2717
          {
2718
            for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2719
              sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2720
 
2721
            if (cslr_p && sched_deps_info->finish_rhs)
2722
              sched_deps_info->finish_rhs ();
2723
 
2724
            return;
2725
          }
2726
        break;
2727
      }
2728
 
2729
    case PRE_DEC:
2730
    case POST_DEC:
2731
    case PRE_INC:
2732
    case POST_INC:
2733
      /* These both read and modify the result.  We must handle them as writes
2734
         to get proper dependencies for following instructions.  We must handle
2735
         them as reads to get proper dependencies from this to previous
2736
         instructions.  Thus we need to pass them to both sched_analyze_1
2737
         and sched_analyze_2.  We must call sched_analyze_2 first in order
2738
         to get the proper antecedent for the read.  */
2739
      sched_analyze_2 (deps, XEXP (x, 0), insn);
2740
      sched_analyze_1 (deps, x, insn);
2741
 
2742
      if (cslr_p && sched_deps_info->finish_rhs)
2743
        sched_deps_info->finish_rhs ();
2744
 
2745
      return;
2746
 
2747
    case POST_MODIFY:
2748
    case PRE_MODIFY:
2749
      /* op0 = op0 + op1 */
2750
      sched_analyze_2 (deps, XEXP (x, 0), insn);
2751
      sched_analyze_2 (deps, XEXP (x, 1), insn);
2752
      sched_analyze_1 (deps, x, insn);
2753
 
2754
      if (cslr_p && sched_deps_info->finish_rhs)
2755
        sched_deps_info->finish_rhs ();
2756
 
2757
      return;
2758
 
2759
    default:
2760
      break;
2761
    }
2762
 
2763
  /* Other cases: walk the insn.  */
2764
  fmt = GET_RTX_FORMAT (code);
2765
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2766
    {
2767
      if (fmt[i] == 'e')
2768
        sched_analyze_2 (deps, XEXP (x, i), insn);
2769
      else if (fmt[i] == 'E')
2770
        for (j = 0; j < XVECLEN (x, i); j++)
2771
          sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2772
    }
2773
 
2774
  if (cslr_p && sched_deps_info->finish_rhs)
2775
    sched_deps_info->finish_rhs ();
2776
}
2777
 
2778
/* Analyze an INSN with pattern X to find all dependencies.  */
2779
static void
2780
sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2781
{
2782
  RTX_CODE code = GET_CODE (x);
2783
  rtx link;
2784
  unsigned i;
2785
  reg_set_iterator rsi;
2786
 
2787
  if (! reload_completed)
2788
    {
2789
      HARD_REG_SET temp;
2790
 
2791
      extract_insn (insn);
2792
      preprocess_constraints ();
2793
      ira_implicitly_set_insn_hard_regs (&temp);
2794
      AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2795
      IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2796
    }
2797
 
2798
  can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2799
                         && code == SET);
2800
 
2801
  if (may_trap_p (x))
2802
    /* Avoid moving trapping instructions accross function calls that might
2803
       not always return.  */
2804
    add_dependence_list (insn, deps->last_function_call_may_noreturn,
2805
                         1, REG_DEP_ANTI);
2806
 
2807
  /* We must avoid creating a situation in which two successors of the
2808
     current block have different unwind info after scheduling.  If at any
2809
     point the two paths re-join this leads to incorrect unwind info.  */
2810
  /* ??? There are certain situations involving a forced frame pointer in
2811
     which, with extra effort, we could fix up the unwind info at a later
2812
     CFG join.  However, it seems better to notice these cases earlier
2813
     during prologue generation and avoid marking the frame pointer setup
2814
     as frame-related at all.  */
2815
  if (RTX_FRAME_RELATED_P (insn))
2816
    {
2817
      /* Make sure prologue insn is scheduled before next jump.  */
2818
      deps->sched_before_next_jump
2819
        = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2820
 
2821
      /* Make sure epilogue insn is scheduled after preceding jumps.  */
2822
      add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI);
2823
    }
2824
 
2825
  if (code == COND_EXEC)
2826
    {
2827
      sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2828
 
2829
      /* ??? Should be recording conditions so we reduce the number of
2830
         false dependencies.  */
2831
      x = COND_EXEC_CODE (x);
2832
      code = GET_CODE (x);
2833
    }
2834
  if (code == SET || code == CLOBBER)
2835
    {
2836
      sched_analyze_1 (deps, x, insn);
2837
 
2838
      /* Bare clobber insns are used for letting life analysis, reg-stack
2839
         and others know that a value is dead.  Depend on the last call
2840
         instruction so that reg-stack won't get confused.  */
2841
      if (code == CLOBBER)
2842
        add_dependence_list (insn, deps->last_function_call, 1,
2843
                             REG_DEP_OUTPUT);
2844
    }
2845
  else if (code == PARALLEL)
2846
    {
2847
      for (i = XVECLEN (x, 0); i--;)
2848
        {
2849
          rtx sub = XVECEXP (x, 0, i);
2850
          code = GET_CODE (sub);
2851
 
2852
          if (code == COND_EXEC)
2853
            {
2854
              sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2855
              sub = COND_EXEC_CODE (sub);
2856
              code = GET_CODE (sub);
2857
            }
2858
          if (code == SET || code == CLOBBER)
2859
            sched_analyze_1 (deps, sub, insn);
2860
          else
2861
            sched_analyze_2 (deps, sub, insn);
2862
        }
2863
    }
2864
  else
2865
    sched_analyze_2 (deps, x, insn);
2866
 
2867
  /* Mark registers CLOBBERED or used by called function.  */
2868
  if (CALL_P (insn))
2869
    {
2870
      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2871
        {
2872
          if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2873
            sched_analyze_1 (deps, XEXP (link, 0), insn);
2874
          else
2875
            sched_analyze_2 (deps, XEXP (link, 0), insn);
2876
        }
2877
      /* Don't schedule anything after a tail call, tail call needs
2878
         to use at least all call-saved registers.  */
2879
      if (SIBLING_CALL_P (insn))
2880
        reg_pending_barrier = TRUE_BARRIER;
2881
      else if (find_reg_note (insn, REG_SETJMP, NULL))
2882
        reg_pending_barrier = MOVE_BARRIER;
2883
    }
2884
 
2885
  if (JUMP_P (insn))
2886
    {
2887
      rtx next;
2888
      next = next_nonnote_nondebug_insn (insn);
2889
      if (next && BARRIER_P (next))
2890
        reg_pending_barrier = MOVE_BARRIER;
2891
      else
2892
        {
2893
          rtx pending, pending_mem;
2894
 
2895
          if (sched_deps_info->compute_jump_reg_dependencies)
2896
            {
2897
              (*sched_deps_info->compute_jump_reg_dependencies)
2898
                (insn, reg_pending_control_uses);
2899
 
2900
              /* Make latency of jump equal to 0 by using anti-dependence.  */
2901
              EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2902
                {
2903
                  struct deps_reg *reg_last = &deps->reg_last[i];
2904
                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2905
                  add_dependence_list (insn, reg_last->implicit_sets,
2906
                                       0, REG_DEP_ANTI);
2907
                  add_dependence_list (insn, reg_last->clobbers, 0,
2908
                                       REG_DEP_ANTI);
2909
                }
2910
            }
2911
 
2912
          /* All memory writes and volatile reads must happen before the
2913
             jump.  Non-volatile reads must happen before the jump iff
2914
             the result is needed by the above register used mask.  */
2915
 
2916
          pending = deps->pending_write_insns;
2917
          pending_mem = deps->pending_write_mems;
2918
          while (pending)
2919
            {
2920
              if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2921
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2922
              pending = XEXP (pending, 1);
2923
              pending_mem = XEXP (pending_mem, 1);
2924
            }
2925
 
2926
          pending = deps->pending_read_insns;
2927
          pending_mem = deps->pending_read_mems;
2928
          while (pending)
2929
            {
2930
              if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2931
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2932
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2933
              pending = XEXP (pending, 1);
2934
              pending_mem = XEXP (pending_mem, 1);
2935
            }
2936
 
2937
          add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2938
                               REG_DEP_ANTI);
2939
          add_dependence_list (insn, deps->pending_jump_insns, 1,
2940
                               REG_DEP_ANTI);
2941
        }
2942
    }
2943
 
2944
  /* If this instruction can throw an exception, then moving it changes
2945
     where block boundaries fall.  This is mighty confusing elsewhere.
2946
     Therefore, prevent such an instruction from being moved.  Same for
2947
     non-jump instructions that define block boundaries.
2948
     ??? Unclear whether this is still necessary in EBB mode.  If not,
2949
     add_branch_dependences should be adjusted for RGN mode instead.  */
2950
  if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2951
      || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2952
    reg_pending_barrier = MOVE_BARRIER;
2953
 
2954
  if (sched_pressure_p)
2955
    {
2956
      setup_insn_reg_uses (deps, insn);
2957
      init_insn_reg_pressure_info (insn);
2958
    }
2959
 
2960
  /* Add register dependencies for insn.  */
2961
  if (DEBUG_INSN_P (insn))
2962
    {
2963
      rtx prev = deps->last_debug_insn;
2964
      rtx u;
2965
 
2966
      if (!deps->readonly)
2967
        deps->last_debug_insn = insn;
2968
 
2969
      if (prev)
2970
        add_dependence (insn, prev, REG_DEP_ANTI);
2971
 
2972
      add_dependence_list (insn, deps->last_function_call, 1,
2973
                           REG_DEP_ANTI);
2974
 
2975
      for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2976
        if (!sel_sched_p ())
2977
          add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2978
 
2979
      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2980
        {
2981
          struct deps_reg *reg_last = &deps->reg_last[i];
2982
          add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2983
          /* There's no point in making REG_DEP_CONTROL dependencies for
2984
             debug insns.  */
2985
          add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2986
 
2987
          if (!deps->readonly)
2988
            reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2989
        }
2990
      CLEAR_REG_SET (reg_pending_uses);
2991
 
2992
      /* Quite often, a debug insn will refer to stuff in the
2993
         previous instruction, but the reason we want this
2994
         dependency here is to make sure the scheduler doesn't
2995
         gratuitously move a debug insn ahead.  This could dirty
2996
         DF flags and cause additional analysis that wouldn't have
2997
         occurred in compilation without debug insns, and such
2998
         additional analysis can modify the generated code.  */
2999
      prev = PREV_INSN (insn);
3000
 
3001
      if (prev && NONDEBUG_INSN_P (prev))
3002
        add_dependence (insn, prev, REG_DEP_ANTI);
3003
    }
3004
  else
3005
    {
3006
      regset_head set_or_clobbered;
3007
 
3008
      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3009
        {
3010
          struct deps_reg *reg_last = &deps->reg_last[i];
3011
          add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
3012
          add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
3013
          add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
3014
 
3015
          if (!deps->readonly)
3016
            {
3017
              reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3018
              reg_last->uses_length++;
3019
            }
3020
        }
3021
 
3022
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3023
        if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3024
          {
3025
            struct deps_reg *reg_last = &deps->reg_last[i];
3026
            add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
3027
            add_dependence_list (insn, reg_last->implicit_sets, 0,
3028
                                 REG_DEP_ANTI);
3029
            add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
3030
 
3031
            if (!deps->readonly)
3032
              {
3033
                reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3034
                reg_last->uses_length++;
3035
              }
3036
          }
3037
 
3038
      if (targetm.sched.exposed_pipeline)
3039
        {
3040
          INIT_REG_SET (&set_or_clobbered);
3041
          bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3042
                      reg_pending_sets);
3043
          EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3044
            {
3045
              struct deps_reg *reg_last = &deps->reg_last[i];
3046
              rtx list;
3047
              for (list = reg_last->uses; list; list = XEXP (list, 1))
3048
                {
3049
                  rtx other = XEXP (list, 0);
3050
                  if (INSN_CACHED_COND (other) != const_true_rtx
3051
                      && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3052
                    INSN_CACHED_COND (other) = const_true_rtx;
3053
                }
3054
            }
3055
        }
3056
 
3057
      /* If the current insn is conditional, we can't free any
3058
         of the lists.  */
3059
      if (sched_has_condition_p (insn))
3060
        {
3061
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3062
            {
3063
              struct deps_reg *reg_last = &deps->reg_last[i];
3064
              add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3065
              add_dependence_list (insn, reg_last->implicit_sets, 0,
3066
                                   REG_DEP_ANTI);
3067
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3068
              add_dependence_list (insn, reg_last->control_uses, 0,
3069
                                   REG_DEP_CONTROL);
3070
 
3071
              if (!deps->readonly)
3072
                {
3073
                  reg_last->clobbers
3074
                    = alloc_INSN_LIST (insn, reg_last->clobbers);
3075
                  reg_last->clobbers_length++;
3076
                }
3077
            }
3078
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3079
            {
3080
              struct deps_reg *reg_last = &deps->reg_last[i];
3081
              add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3082
              add_dependence_list (insn, reg_last->implicit_sets, 0,
3083
                                   REG_DEP_ANTI);
3084
              add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
3085
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3086
              add_dependence_list (insn, reg_last->control_uses, 0,
3087
                                   REG_DEP_CONTROL);
3088
 
3089
              if (!deps->readonly)
3090
                reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3091
            }
3092
        }
3093
      else
3094
        {
3095
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3096
            {
3097
              struct deps_reg *reg_last = &deps->reg_last[i];
3098
              if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3099
                  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3100
                {
3101
                  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3102
                                                REG_DEP_OUTPUT);
3103
                  add_dependence_list_and_free (deps, insn,
3104
                                                &reg_last->implicit_sets, 0,
3105
                                                REG_DEP_ANTI);
3106
                  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3107
                                                REG_DEP_ANTI);
3108
                  add_dependence_list_and_free (deps, insn,
3109
                                                &reg_last->control_uses, 0,
3110
                                                REG_DEP_ANTI);
3111
                  add_dependence_list_and_free
3112
                    (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
3113
 
3114
                  if (!deps->readonly)
3115
                    {
3116
                      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3117
                      reg_last->clobbers_length = 0;
3118
                      reg_last->uses_length = 0;
3119
                    }
3120
                }
3121
              else
3122
                {
3123
                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3124
                  add_dependence_list (insn, reg_last->implicit_sets, 0,
3125
                                       REG_DEP_ANTI);
3126
                  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3127
                  add_dependence_list (insn, reg_last->control_uses, 0,
3128
                                       REG_DEP_CONTROL);
3129
                }
3130
 
3131
              if (!deps->readonly)
3132
                {
3133
                  reg_last->clobbers_length++;
3134
                  reg_last->clobbers
3135
                    = alloc_INSN_LIST (insn, reg_last->clobbers);
3136
                }
3137
            }
3138
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3139
            {
3140
              struct deps_reg *reg_last = &deps->reg_last[i];
3141
 
3142
              add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3143
                                            REG_DEP_OUTPUT);
3144
              add_dependence_list_and_free (deps, insn,
3145
                                            &reg_last->implicit_sets,
3146
                                            0, REG_DEP_ANTI);
3147
              add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3148
                                            REG_DEP_OUTPUT);
3149
              add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3150
                                            REG_DEP_ANTI);
3151
              add_dependence_list (insn, reg_last->control_uses, 0,
3152
                                   REG_DEP_CONTROL);
3153
 
3154
              if (!deps->readonly)
3155
                {
3156
                  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3157
                  reg_last->uses_length = 0;
3158
                  reg_last->clobbers_length = 0;
3159
                }
3160
            }
3161
        }
3162
      if (!deps->readonly)
3163
        {
3164
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3165
            {
3166
              struct deps_reg *reg_last = &deps->reg_last[i];
3167
              reg_last->control_uses
3168
                = alloc_INSN_LIST (insn, reg_last->control_uses);
3169
            }
3170
        }
3171
    }
3172
 
3173
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3174
    if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3175
      {
3176
        struct deps_reg *reg_last = &deps->reg_last[i];
3177
        add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
3178
        add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
3179
        add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3180
        add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI);
3181
 
3182
        if (!deps->readonly)
3183
          reg_last->implicit_sets
3184
            = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3185
      }
3186
 
3187
  if (!deps->readonly)
3188
    {
3189
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3190
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3191
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3192
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3193
        if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3194
            || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3195
          SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3196
 
3197
      /* Set up the pending barrier found.  */
3198
      deps->last_reg_pending_barrier = reg_pending_barrier;
3199
    }
3200
 
3201
  CLEAR_REG_SET (reg_pending_uses);
3202
  CLEAR_REG_SET (reg_pending_clobbers);
3203
  CLEAR_REG_SET (reg_pending_sets);
3204
  CLEAR_REG_SET (reg_pending_control_uses);
3205
  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3206
  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3207
 
3208
  /* Add dependencies if a scheduling barrier was found.  */
3209
  if (reg_pending_barrier)
3210
    {
3211
      /* In the case of barrier the most added dependencies are not
3212
         real, so we use anti-dependence here.  */
3213
      if (sched_has_condition_p (insn))
3214
        {
3215
          EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3216
            {
3217
              struct deps_reg *reg_last = &deps->reg_last[i];
3218
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3219
              add_dependence_list (insn, reg_last->sets, 0,
3220
                                   reg_pending_barrier == TRUE_BARRIER
3221
                                   ? REG_DEP_TRUE : REG_DEP_ANTI);
3222
              add_dependence_list (insn, reg_last->implicit_sets, 0,
3223
                                   REG_DEP_ANTI);
3224
              add_dependence_list (insn, reg_last->clobbers, 0,
3225
                                   reg_pending_barrier == TRUE_BARRIER
3226
                                   ? REG_DEP_TRUE : REG_DEP_ANTI);
3227
            }
3228
        }
3229
      else
3230
        {
3231
          EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3232
            {
3233
              struct deps_reg *reg_last = &deps->reg_last[i];
3234
              add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3235
                                            REG_DEP_ANTI);
3236
              add_dependence_list_and_free (deps, insn,
3237
                                            &reg_last->control_uses, 0,
3238
                                            REG_DEP_CONTROL);
3239
              add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3240
                                            reg_pending_barrier == TRUE_BARRIER
3241
                                            ? REG_DEP_TRUE : REG_DEP_ANTI);
3242
              add_dependence_list_and_free (deps, insn,
3243
                                            &reg_last->implicit_sets, 0,
3244
                                            REG_DEP_ANTI);
3245
              add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3246
                                            reg_pending_barrier == TRUE_BARRIER
3247
                                            ? REG_DEP_TRUE : REG_DEP_ANTI);
3248
 
3249
              if (!deps->readonly)
3250
                {
3251
                  reg_last->uses_length = 0;
3252
                  reg_last->clobbers_length = 0;
3253
                }
3254
            }
3255
        }
3256
 
3257
      if (!deps->readonly)
3258
        for (i = 0; i < (unsigned)deps->max_reg; i++)
3259
          {
3260
            struct deps_reg *reg_last = &deps->reg_last[i];
3261
            reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3262
            SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3263
          }
3264
 
3265
      /* Flush pending lists on jumps, but not on speculative checks.  */
3266
      if (JUMP_P (insn) && !(sel_sched_p ()
3267
                             && sel_insn_is_speculation_check (insn)))
3268
        flush_pending_lists (deps, insn, true, true);
3269
 
3270
      reg_pending_barrier = NOT_A_BARRIER;
3271
    }
3272
 
3273
  /* If a post-call group is still open, see if it should remain so.
3274
     This insn must be a simple move of a hard reg to a pseudo or
3275
     vice-versa.
3276
 
3277
     We must avoid moving these insns for correctness on targets
3278
     with small register classes, and for special registers like
3279
     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3280
     hard regs for all targets.  */
3281
 
3282
  if (deps->in_post_call_group_p)
3283
    {
3284
      rtx tmp, set = single_set (insn);
3285
      int src_regno, dest_regno;
3286
 
3287
      if (set == NULL)
3288
        {
3289
          if (DEBUG_INSN_P (insn))
3290
            /* We don't want to mark debug insns as part of the same
3291
               sched group.  We know they really aren't, but if we use
3292
               debug insns to tell that a call group is over, we'll
3293
               get different code if debug insns are not there and
3294
               instructions that follow seem like they should be part
3295
               of the call group.
3296
 
3297
               Also, if we did, fixup_sched_groups() would move the
3298
               deps of the debug insn to the call insn, modifying
3299
               non-debug post-dependency counts of the debug insn
3300
               dependencies and otherwise messing with the scheduling
3301
               order.
3302
 
3303
               Instead, let such debug insns be scheduled freely, but
3304
               keep the call group open in case there are insns that
3305
               should be part of it afterwards.  Since we grant debug
3306
               insns higher priority than even sched group insns, it
3307
               will all turn out all right.  */
3308
            goto debug_dont_end_call_group;
3309
          else
3310
            goto end_call_group;
3311
        }
3312
 
3313
      tmp = SET_DEST (set);
3314
      if (GET_CODE (tmp) == SUBREG)
3315
        tmp = SUBREG_REG (tmp);
3316
      if (REG_P (tmp))
3317
        dest_regno = REGNO (tmp);
3318
      else
3319
        goto end_call_group;
3320
 
3321
      tmp = SET_SRC (set);
3322
      if (GET_CODE (tmp) == SUBREG)
3323
        tmp = SUBREG_REG (tmp);
3324
      if ((GET_CODE (tmp) == PLUS
3325
           || GET_CODE (tmp) == MINUS)
3326
          && REG_P (XEXP (tmp, 0))
3327
          && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3328
          && dest_regno == STACK_POINTER_REGNUM)
3329
        src_regno = STACK_POINTER_REGNUM;
3330
      else if (REG_P (tmp))
3331
        src_regno = REGNO (tmp);
3332
      else
3333
        goto end_call_group;
3334
 
3335
      if (src_regno < FIRST_PSEUDO_REGISTER
3336
          || dest_regno < FIRST_PSEUDO_REGISTER)
3337
        {
3338
          if (!deps->readonly
3339
              && deps->in_post_call_group_p == post_call_initial)
3340
            deps->in_post_call_group_p = post_call;
3341
 
3342
          if (!sel_sched_p () || sched_emulate_haifa_p)
3343
            {
3344
              SCHED_GROUP_P (insn) = 1;
3345
              CANT_MOVE (insn) = 1;
3346
            }
3347
        }
3348
      else
3349
        {
3350
        end_call_group:
3351
          if (!deps->readonly)
3352
            deps->in_post_call_group_p = not_post_call;
3353
        }
3354
    }
3355
 
3356
 debug_dont_end_call_group:
3357
  if ((current_sched_info->flags & DO_SPECULATION)
3358
      && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3359
    /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3360
       be speculated.  */
3361
    {
3362
      if (sel_sched_p ())
3363
        sel_mark_hard_insn (insn);
3364
      else
3365
        {
3366
          sd_iterator_def sd_it;
3367
          dep_t dep;
3368
 
3369
          for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3370
               sd_iterator_cond (&sd_it, &dep);)
3371
            change_spec_dep_to_hard (sd_it);
3372
        }
3373
    }
3374
}
3375
 
3376
/* Return TRUE if INSN might not always return normally (e.g. call exit,
3377
   longjmp, loop forever, ...).  */
3378
static bool
3379
call_may_noreturn_p (rtx insn)
3380
{
3381
  rtx call;
3382
 
3383
  /* const or pure calls that aren't looping will always return.  */
3384
  if (RTL_CONST_OR_PURE_CALL_P (insn)
3385
      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3386
    return false;
3387
 
3388
  call = PATTERN (insn);
3389
  if (GET_CODE (call) == PARALLEL)
3390
    call = XVECEXP (call, 0, 0);
3391
  if (GET_CODE (call) == SET)
3392
    call = SET_SRC (call);
3393
  if (GET_CODE (call) == CALL
3394
      && MEM_P (XEXP (call, 0))
3395
      && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3396
    {
3397
      rtx symbol = XEXP (XEXP (call, 0), 0);
3398
      if (SYMBOL_REF_DECL (symbol)
3399
          && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3400
        {
3401
          if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3402
              == BUILT_IN_NORMAL)
3403
            switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3404
              {
3405
              case BUILT_IN_BCMP:
3406
              case BUILT_IN_BCOPY:
3407
              case BUILT_IN_BZERO:
3408
              case BUILT_IN_INDEX:
3409
              case BUILT_IN_MEMCHR:
3410
              case BUILT_IN_MEMCMP:
3411
              case BUILT_IN_MEMCPY:
3412
              case BUILT_IN_MEMMOVE:
3413
              case BUILT_IN_MEMPCPY:
3414
              case BUILT_IN_MEMSET:
3415
              case BUILT_IN_RINDEX:
3416
              case BUILT_IN_STPCPY:
3417
              case BUILT_IN_STPNCPY:
3418
              case BUILT_IN_STRCAT:
3419
              case BUILT_IN_STRCHR:
3420
              case BUILT_IN_STRCMP:
3421
              case BUILT_IN_STRCPY:
3422
              case BUILT_IN_STRCSPN:
3423
              case BUILT_IN_STRLEN:
3424
              case BUILT_IN_STRNCAT:
3425
              case BUILT_IN_STRNCMP:
3426
              case BUILT_IN_STRNCPY:
3427
              case BUILT_IN_STRPBRK:
3428
              case BUILT_IN_STRRCHR:
3429
              case BUILT_IN_STRSPN:
3430
              case BUILT_IN_STRSTR:
3431
                /* Assume certain string/memory builtins always return.  */
3432
                return false;
3433
              default:
3434
                break;
3435
              }
3436
        }
3437
    }
3438
 
3439
  /* For all other calls assume that they might not always return.  */
3440
  return true;
3441
}
3442
 
3443
/* Analyze INSN with DEPS as a context.  */
3444
void
3445
deps_analyze_insn (struct deps_desc *deps, rtx insn)
3446
{
3447
  if (sched_deps_info->start_insn)
3448
    sched_deps_info->start_insn (insn);
3449
 
3450
  /* Record the condition for this insn.  */
3451
  if (NONDEBUG_INSN_P (insn))
3452
    {
3453
      rtx t;
3454
      sched_get_condition_with_rev (insn, NULL);
3455
      t = INSN_CACHED_COND (insn);
3456
      INSN_COND_DEPS (insn) = NULL_RTX;
3457
      if (reload_completed
3458
          && (current_sched_info->flags & DO_PREDICATION)
3459
          && COMPARISON_P (t)
3460
          && REG_P (XEXP (t, 0))
3461
          && CONSTANT_P (XEXP (t, 1)))
3462
        {
3463
          unsigned int regno;
3464
          int nregs;
3465
          t = XEXP (t, 0);
3466
          regno = REGNO (t);
3467
          nregs = hard_regno_nregs[regno][GET_MODE (t)];
3468
          t = NULL_RTX;
3469
          while (nregs-- > 0)
3470
            {
3471
              struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3472
              t = concat_INSN_LIST (reg_last->sets, t);
3473
              t = concat_INSN_LIST (reg_last->clobbers, t);
3474
              t = concat_INSN_LIST (reg_last->implicit_sets, t);
3475
            }
3476
          INSN_COND_DEPS (insn) = t;
3477
        }
3478
    }
3479
 
3480
  if (JUMP_P (insn))
3481
    {
3482
      /* Make each JUMP_INSN (but not a speculative check)
3483
         a scheduling barrier for memory references.  */
3484
      if (!deps->readonly
3485
          && !(sel_sched_p ()
3486
               && sel_insn_is_speculation_check (insn)))
3487
        {
3488
          /* Keep the list a reasonable size.  */
3489
          if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3490
            flush_pending_lists (deps, insn, true, true);
3491
          else
3492
            deps->pending_jump_insns
3493
              = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3494
        }
3495
 
3496
      /* For each insn which shouldn't cross a jump, add a dependence.  */
3497
      add_dependence_list_and_free (deps, insn,
3498
                                    &deps->sched_before_next_jump, 1,
3499
                                    REG_DEP_ANTI);
3500
 
3501
      sched_analyze_insn (deps, PATTERN (insn), insn);
3502
    }
3503
  else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3504
    {
3505
      sched_analyze_insn (deps, PATTERN (insn), insn);
3506
    }
3507
  else if (CALL_P (insn))
3508
    {
3509
      int i;
3510
 
3511
      CANT_MOVE (insn) = 1;
3512
 
3513
      if (find_reg_note (insn, REG_SETJMP, NULL))
3514
        {
3515
          /* This is setjmp.  Assume that all registers, not just
3516
             hard registers, may be clobbered by this call.  */
3517
          reg_pending_barrier = MOVE_BARRIER;
3518
        }
3519
      else
3520
        {
3521
          for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3522
            /* A call may read and modify global register variables.  */
3523
            if (global_regs[i])
3524
              {
3525
                SET_REGNO_REG_SET (reg_pending_sets, i);
3526
                SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3527
              }
3528
          /* Other call-clobbered hard regs may be clobbered.
3529
             Since we only have a choice between 'might be clobbered'
3530
             and 'definitely not clobbered', we must include all
3531
             partly call-clobbered registers here.  */
3532
            else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3533
                     || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3534
              SET_REGNO_REG_SET (reg_pending_clobbers, i);
3535
          /* We don't know what set of fixed registers might be used
3536
             by the function, but it is certain that the stack pointer
3537
             is among them, but be conservative.  */
3538
            else if (fixed_regs[i])
3539
              SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3540
          /* The frame pointer is normally not used by the function
3541
             itself, but by the debugger.  */
3542
          /* ??? MIPS o32 is an exception.  It uses the frame pointer
3543
             in the macro expansion of jal but does not represent this
3544
             fact in the call_insn rtl.  */
3545
            else if (i == FRAME_POINTER_REGNUM
3546
                     || (i == HARD_FRAME_POINTER_REGNUM
3547
                         && (! reload_completed || frame_pointer_needed)))
3548
              SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3549
        }
3550
 
3551
      /* For each insn which shouldn't cross a call, add a dependence
3552
         between that insn and this call insn.  */
3553
      add_dependence_list_and_free (deps, insn,
3554
                                    &deps->sched_before_next_call, 1,
3555
                                    REG_DEP_ANTI);
3556
 
3557
      sched_analyze_insn (deps, PATTERN (insn), insn);
3558
 
3559
      /* If CALL would be in a sched group, then this will violate
3560
         convention that sched group insns have dependencies only on the
3561
         previous instruction.
3562
 
3563
         Of course one can say: "Hey!  What about head of the sched group?"
3564
         And I will answer: "Basic principles (one dep per insn) are always
3565
         the same."  */
3566
      gcc_assert (!SCHED_GROUP_P (insn));
3567
 
3568
      /* In the absence of interprocedural alias analysis, we must flush
3569
         all pending reads and writes, and start new dependencies starting
3570
         from here.  But only flush writes for constant calls (which may
3571
         be passed a pointer to something we haven't written yet).  */
3572
      flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3573
 
3574
      if (!deps->readonly)
3575
        {
3576
          /* Remember the last function call for limiting lifetimes.  */
3577
          free_INSN_LIST_list (&deps->last_function_call);
3578
          deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3579
 
3580
          if (call_may_noreturn_p (insn))
3581
            {
3582
              /* Remember the last function call that might not always return
3583
                 normally for limiting moves of trapping insns.  */
3584
              free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3585
              deps->last_function_call_may_noreturn
3586
                = alloc_INSN_LIST (insn, NULL_RTX);
3587
            }
3588
 
3589
          /* Before reload, begin a post-call group, so as to keep the
3590
             lifetimes of hard registers correct.  */
3591
          if (! reload_completed)
3592
            deps->in_post_call_group_p = post_call;
3593
        }
3594
    }
3595
 
3596
  if (sched_deps_info->use_cselib)
3597
    cselib_process_insn (insn);
3598
 
3599
  /* EH_REGION insn notes can not appear until well after we complete
3600
     scheduling.  */
3601
  if (NOTE_P (insn))
3602
    gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3603
                && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3604
 
3605
  if (sched_deps_info->finish_insn)
3606
    sched_deps_info->finish_insn ();
3607
 
3608
  /* Fixup the dependencies in the sched group.  */
3609
  if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3610
      && SCHED_GROUP_P (insn) && !sel_sched_p ())
3611
    fixup_sched_groups (insn);
3612
}
3613
 
3614
/* Initialize DEPS for the new block beginning with HEAD.  */
3615
void
3616
deps_start_bb (struct deps_desc *deps, rtx head)
3617
{
3618
  gcc_assert (!deps->readonly);
3619
 
3620
  /* Before reload, if the previous block ended in a call, show that
3621
     we are inside a post-call group, so as to keep the lifetimes of
3622
     hard registers correct.  */
3623
  if (! reload_completed && !LABEL_P (head))
3624
    {
3625
      rtx insn = prev_nonnote_nondebug_insn (head);
3626
 
3627
      if (insn && CALL_P (insn))
3628
        deps->in_post_call_group_p = post_call_initial;
3629
    }
3630
}
3631
 
3632
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3633
   dependencies for each insn.  */
3634
void
3635
sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3636
{
3637
  rtx insn;
3638
 
3639
  if (sched_deps_info->use_cselib)
3640
    cselib_init (CSELIB_RECORD_MEMORY);
3641
 
3642
  deps_start_bb (deps, head);
3643
 
3644
  for (insn = head;; insn = NEXT_INSN (insn))
3645
    {
3646
 
3647
      if (INSN_P (insn))
3648
        {
3649
          /* And initialize deps_lists.  */
3650
          sd_init_insn (insn);
3651
        }
3652
 
3653
      deps_analyze_insn (deps, insn);
3654
 
3655
      if (insn == tail)
3656
        {
3657
          if (sched_deps_info->use_cselib)
3658
            cselib_finish ();
3659
          return;
3660
        }
3661
    }
3662
  gcc_unreachable ();
3663
}
3664
 
3665
/* Helper for sched_free_deps ().
3666
   Delete INSN's (RESOLVED_P) backward dependencies.  */
3667
static void
3668
delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3669
{
3670
  sd_iterator_def sd_it;
3671
  dep_t dep;
3672
  sd_list_types_def types;
3673
 
3674
  if (resolved_p)
3675
    types = SD_LIST_RES_BACK;
3676
  else
3677
    types = SD_LIST_BACK;
3678
 
3679
  for (sd_it = sd_iterator_start (insn, types);
3680
       sd_iterator_cond (&sd_it, &dep);)
3681
    {
3682
      dep_link_t link = *sd_it.linkp;
3683
      dep_node_t node = DEP_LINK_NODE (link);
3684
      deps_list_t back_list;
3685
      deps_list_t forw_list;
3686
 
3687
      get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3688
      remove_from_deps_list (link, back_list);
3689
      delete_dep_node (node);
3690
    }
3691
}
3692
 
3693
/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3694
   deps_lists.  */
3695
void
3696
sched_free_deps (rtx head, rtx tail, bool resolved_p)
3697
{
3698
  rtx insn;
3699
  rtx next_tail = NEXT_INSN (tail);
3700
 
3701
  /* We make two passes since some insns may be scheduled before their
3702
     dependencies are resolved.  */
3703
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3704
    if (INSN_P (insn) && INSN_LUID (insn) > 0)
3705
      {
3706
        /* Clear forward deps and leave the dep_nodes to the
3707
           corresponding back_deps list.  */
3708
        if (resolved_p)
3709
          clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3710
        else
3711
          clear_deps_list (INSN_FORW_DEPS (insn));
3712
      }
3713
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3714
    if (INSN_P (insn) && INSN_LUID (insn) > 0)
3715
      {
3716
        /* Clear resolved back deps together with its dep_nodes.  */
3717
        delete_dep_nodes_in_back_deps (insn, resolved_p);
3718
 
3719
        sd_finish_insn (insn);
3720
      }
3721
}
3722
 
3723
/* Initialize variables for region data dependence analysis.
3724
   When LAZY_REG_LAST is true, do not allocate reg_last array
3725
   of struct deps_desc immediately.  */
3726
 
3727
void
3728
init_deps (struct deps_desc *deps, bool lazy_reg_last)
3729
{
3730
  int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3731
 
3732
  deps->max_reg = max_reg;
3733
  if (lazy_reg_last)
3734
    deps->reg_last = NULL;
3735
  else
3736
    deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3737
  INIT_REG_SET (&deps->reg_last_in_use);
3738
 
3739
  deps->pending_read_insns = 0;
3740
  deps->pending_read_mems = 0;
3741
  deps->pending_write_insns = 0;
3742
  deps->pending_write_mems = 0;
3743
  deps->pending_jump_insns = 0;
3744
  deps->pending_read_list_length = 0;
3745
  deps->pending_write_list_length = 0;
3746
  deps->pending_flush_length = 0;
3747
  deps->last_pending_memory_flush = 0;
3748
  deps->last_function_call = 0;
3749
  deps->last_function_call_may_noreturn = 0;
3750
  deps->sched_before_next_call = 0;
3751
  deps->sched_before_next_jump = 0;
3752
  deps->in_post_call_group_p = not_post_call;
3753
  deps->last_debug_insn = 0;
3754
  deps->last_reg_pending_barrier = NOT_A_BARRIER;
3755
  deps->readonly = 0;
3756
}
3757
 
3758
/* Init only reg_last field of DEPS, which was not allocated before as
3759
   we inited DEPS lazily.  */
3760
void
3761
init_deps_reg_last (struct deps_desc *deps)
3762
{
3763
  gcc_assert (deps && deps->max_reg > 0);
3764
  gcc_assert (deps->reg_last == NULL);
3765
 
3766
  deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3767
}
3768
 
3769
 
3770
/* Free insn lists found in DEPS.  */
3771
 
3772
void
3773
free_deps (struct deps_desc *deps)
3774
{
3775
  unsigned i;
3776
  reg_set_iterator rsi;
3777
 
3778
  /* We set max_reg to 0 when this context was already freed.  */
3779
  if (deps->max_reg == 0)
3780
    {
3781
      gcc_assert (deps->reg_last == NULL);
3782
      return;
3783
    }
3784
  deps->max_reg = 0;
3785
 
3786
  free_INSN_LIST_list (&deps->pending_read_insns);
3787
  free_EXPR_LIST_list (&deps->pending_read_mems);
3788
  free_INSN_LIST_list (&deps->pending_write_insns);
3789
  free_EXPR_LIST_list (&deps->pending_write_mems);
3790
  free_INSN_LIST_list (&deps->last_pending_memory_flush);
3791
 
3792
  /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3793
     times.  For a testcase with 42000 regs and 8000 small basic blocks,
3794
     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3795
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3796
    {
3797
      struct deps_reg *reg_last = &deps->reg_last[i];
3798
      if (reg_last->uses)
3799
        free_INSN_LIST_list (&reg_last->uses);
3800
      if (reg_last->sets)
3801
        free_INSN_LIST_list (&reg_last->sets);
3802
      if (reg_last->implicit_sets)
3803
        free_INSN_LIST_list (&reg_last->implicit_sets);
3804
      if (reg_last->control_uses)
3805
        free_INSN_LIST_list (&reg_last->control_uses);
3806
      if (reg_last->clobbers)
3807
        free_INSN_LIST_list (&reg_last->clobbers);
3808
    }
3809
  CLEAR_REG_SET (&deps->reg_last_in_use);
3810
 
3811
  /* As we initialize reg_last lazily, it is possible that we didn't allocate
3812
     it at all.  */
3813
  free (deps->reg_last);
3814
  deps->reg_last = NULL;
3815
 
3816
  deps = NULL;
3817
}
3818
 
3819
/* Remove INSN from dependence contexts DEPS.  */
3820
void
3821
remove_from_deps (struct deps_desc *deps, rtx insn)
3822
{
3823
  int removed;
3824
  unsigned i;
3825
  reg_set_iterator rsi;
3826
 
3827
  removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3828
                                               &deps->pending_read_mems);
3829
  if (!DEBUG_INSN_P (insn))
3830
    deps->pending_read_list_length -= removed;
3831
  removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3832
                                               &deps->pending_write_mems);
3833
  deps->pending_write_list_length -= removed;
3834
 
3835
  removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3836
  deps->pending_flush_length -= removed;
3837
  removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3838
  deps->pending_flush_length -= removed;
3839
 
3840
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3841
    {
3842
      struct deps_reg *reg_last = &deps->reg_last[i];
3843
      if (reg_last->uses)
3844
        remove_from_dependence_list (insn, &reg_last->uses);
3845
      if (reg_last->sets)
3846
        remove_from_dependence_list (insn, &reg_last->sets);
3847
      if (reg_last->implicit_sets)
3848
        remove_from_dependence_list (insn, &reg_last->implicit_sets);
3849
      if (reg_last->clobbers)
3850
        remove_from_dependence_list (insn, &reg_last->clobbers);
3851
      if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3852
          && !reg_last->clobbers)
3853
        CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3854
    }
3855
 
3856
  if (CALL_P (insn))
3857
    {
3858
      remove_from_dependence_list (insn, &deps->last_function_call);
3859
      remove_from_dependence_list (insn,
3860
                                   &deps->last_function_call_may_noreturn);
3861
    }
3862
  remove_from_dependence_list (insn, &deps->sched_before_next_call);
3863
}
3864
 
3865
/* Init deps data vector.  */
3866
static void
3867
init_deps_data_vector (void)
3868
{
3869
  int reserve = (sched_max_luid + 1
3870
                 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3871
  if (reserve > 0
3872
      && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3873
    VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3874
                           3 * sched_max_luid / 2);
3875
}
3876
 
3877
/* If it is profitable to use them, initialize or extend (depending on
3878
   GLOBAL_P) dependency data.  */
3879
void
3880
sched_deps_init (bool global_p)
3881
{
3882
  /* Average number of insns in the basic block.
3883
     '+ 1' is used to make it nonzero.  */
3884
  int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3885
 
3886
  init_deps_data_vector ();
3887
 
3888
  /* We use another caching mechanism for selective scheduling, so
3889
     we don't use this one.  */
3890
  if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3891
    {
3892
      /* ?!? We could save some memory by computing a per-region luid mapping
3893
         which could reduce both the number of vectors in the cache and the
3894
         size of each vector.  Instead we just avoid the cache entirely unless
3895
         the average number of instructions in a basic block is very high.  See
3896
         the comment before the declaration of true_dependency_cache for
3897
         what we consider "very high".  */
3898
      cache_size = 0;
3899
      extend_dependency_caches (sched_max_luid, true);
3900
    }
3901
 
3902
  if (global_p)
3903
    {
3904
      dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3905
                                   /* Allocate lists for one block at a time.  */
3906
                                   insns_in_block);
3907
      dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3908
                                   /* Allocate nodes for one block at a time.
3909
                                      We assume that average insn has
3910
                                      5 producers.  */
3911
                                   5 * insns_in_block);
3912
    }
3913
}
3914
 
3915
 
3916
/* Create or extend (depending on CREATE_P) dependency caches to
3917
   size N.  */
3918
void
3919
extend_dependency_caches (int n, bool create_p)
3920
{
3921
  if (create_p || true_dependency_cache)
3922
    {
3923
      int i, luid = cache_size + n;
3924
 
3925
      true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3926
                                          luid);
3927
      output_dependency_cache = XRESIZEVEC (bitmap_head,
3928
                                            output_dependency_cache, luid);
3929
      anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3930
                                          luid);
3931
      control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
3932
                                          luid);
3933
 
3934
      if (current_sched_info->flags & DO_SPECULATION)
3935
        spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3936
                                            luid);
3937
 
3938
      for (i = cache_size; i < luid; i++)
3939
        {
3940
          bitmap_initialize (&true_dependency_cache[i], 0);
3941
          bitmap_initialize (&output_dependency_cache[i], 0);
3942
          bitmap_initialize (&anti_dependency_cache[i], 0);
3943
          bitmap_initialize (&control_dependency_cache[i], 0);
3944
 
3945
          if (current_sched_info->flags & DO_SPECULATION)
3946
            bitmap_initialize (&spec_dependency_cache[i], 0);
3947
        }
3948
      cache_size = luid;
3949
    }
3950
}
3951
 
3952
/* Finalize dependency information for the whole function.  */
3953
void
3954
sched_deps_finish (void)
3955
{
3956
  gcc_assert (deps_pools_are_empty_p ());
3957
  free_alloc_pool_if_empty (&dn_pool);
3958
  free_alloc_pool_if_empty (&dl_pool);
3959
  gcc_assert (dn_pool == NULL && dl_pool == NULL);
3960
 
3961
  VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3962
  cache_size = 0;
3963
 
3964
  if (true_dependency_cache)
3965
    {
3966
      int i;
3967
 
3968
      for (i = 0; i < cache_size; i++)
3969
        {
3970
          bitmap_clear (&true_dependency_cache[i]);
3971
          bitmap_clear (&output_dependency_cache[i]);
3972
          bitmap_clear (&anti_dependency_cache[i]);
3973
          bitmap_clear (&control_dependency_cache[i]);
3974
 
3975
          if (sched_deps_info->generate_spec_deps)
3976
            bitmap_clear (&spec_dependency_cache[i]);
3977
        }
3978
      free (true_dependency_cache);
3979
      true_dependency_cache = NULL;
3980
      free (output_dependency_cache);
3981
      output_dependency_cache = NULL;
3982
      free (anti_dependency_cache);
3983
      anti_dependency_cache = NULL;
3984
      free (control_dependency_cache);
3985
      control_dependency_cache = NULL;
3986
 
3987
      if (sched_deps_info->generate_spec_deps)
3988
        {
3989
          free (spec_dependency_cache);
3990
          spec_dependency_cache = NULL;
3991
        }
3992
 
3993
    }
3994
}
3995
 
3996
/* Initialize some global variables needed by the dependency analysis
3997
   code.  */
3998
 
3999
void
4000
init_deps_global (void)
4001
{
4002
  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4003
  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4004
  reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4005
  reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4006
  reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4007
  reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4008
  reg_pending_barrier = NOT_A_BARRIER;
4009
 
4010
  if (!sel_sched_p () || sched_emulate_haifa_p)
4011
    {
4012
      sched_deps_info->start_insn = haifa_start_insn;
4013
      sched_deps_info->finish_insn = haifa_finish_insn;
4014
 
4015
      sched_deps_info->note_reg_set = haifa_note_reg_set;
4016
      sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4017
      sched_deps_info->note_reg_use = haifa_note_reg_use;
4018
 
4019
      sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4020
      sched_deps_info->note_dep = haifa_note_dep;
4021
   }
4022
}
4023
 
4024
/* Free everything used by the dependency analysis code.  */
4025
 
4026
void
4027
finish_deps_global (void)
4028
{
4029
  FREE_REG_SET (reg_pending_sets);
4030
  FREE_REG_SET (reg_pending_clobbers);
4031
  FREE_REG_SET (reg_pending_uses);
4032
  FREE_REG_SET (reg_pending_control_uses);
4033
}
4034
 
4035
/* Estimate the weakness of dependence between MEM1 and MEM2.  */
4036
dw_t
4037
estimate_dep_weak (rtx mem1, rtx mem2)
4038
{
4039
  rtx r1, r2;
4040
 
4041
  if (mem1 == mem2)
4042
    /* MEMs are the same - don't speculate.  */
4043
    return MIN_DEP_WEAK;
4044
 
4045
  r1 = XEXP (mem1, 0);
4046
  r2 = XEXP (mem2, 0);
4047
 
4048
  if (r1 == r2
4049
      || (REG_P (r1) && REG_P (r2)
4050
          && REGNO (r1) == REGNO (r2)))
4051
    /* Again, MEMs are the same.  */
4052
    return MIN_DEP_WEAK;
4053
  else if ((REG_P (r1) && !REG_P (r2))
4054
           || (!REG_P (r1) && REG_P (r2)))
4055
    /* Different addressing modes - reason to be more speculative,
4056
       than usual.  */
4057
    return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4058
  else
4059
    /* We can't say anything about the dependence.  */
4060
    return UNCERTAIN_DEP_WEAK;
4061
}
4062
 
4063
/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4064
   This function can handle same INSN and ELEM (INSN == ELEM).
4065
   It is a convenience wrapper.  */
4066
static void
4067
add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4068
{
4069
  ds_t ds;
4070
  bool internal;
4071
 
4072
  if (dep_type == REG_DEP_TRUE)
4073
    ds = DEP_TRUE;
4074
  else if (dep_type == REG_DEP_OUTPUT)
4075
    ds = DEP_OUTPUT;
4076
  else if (dep_type == REG_DEP_CONTROL)
4077
    ds = DEP_CONTROL;
4078
  else
4079
    {
4080
      gcc_assert (dep_type == REG_DEP_ANTI);
4081
      ds = DEP_ANTI;
4082
    }
4083
 
4084
  /* When add_dependence is called from inside sched-deps.c, we expect
4085
     cur_insn to be non-null.  */
4086
  internal = cur_insn != NULL;
4087
  if (internal)
4088
    gcc_assert (insn == cur_insn);
4089
  else
4090
    cur_insn = insn;
4091
 
4092
  note_dep (elem, ds);
4093
  if (!internal)
4094
    cur_insn = NULL;
4095
}
4096
 
4097
/* Return weakness of speculative type TYPE in the dep_status DS.  */
4098
dw_t
4099
get_dep_weak_1 (ds_t ds, ds_t type)
4100
{
4101
  ds = ds & type;
4102
 
4103
  switch (type)
4104
    {
4105
    case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4106
    case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4107
    case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4108
    case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4109
    default: gcc_unreachable ();
4110
    }
4111
 
4112
  return (dw_t) ds;
4113
}
4114
 
4115
dw_t
4116
get_dep_weak (ds_t ds, ds_t type)
4117
{
4118
  dw_t dw = get_dep_weak_1 (ds, type);
4119
 
4120
  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4121
  return dw;
4122
}
4123
 
4124
/* Return the dep_status, which has the same parameters as DS, except for
4125
   speculative type TYPE, that will have weakness DW.  */
4126
ds_t
4127
set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4128
{
4129
  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4130
 
4131
  ds &= ~type;
4132
  switch (type)
4133
    {
4134
    case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4135
    case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4136
    case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4137
    case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4138
    default: gcc_unreachable ();
4139
    }
4140
  return ds;
4141
}
4142
 
4143
/* Return the join of two dep_statuses DS1 and DS2.
4144
   If MAX_P is true then choose the greater probability,
4145
   otherwise multiply probabilities.
4146
   This function assumes that both DS1 and DS2 contain speculative bits.  */
4147
static ds_t
4148
ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4149
{
4150
  ds_t ds, t;
4151
 
4152
  gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4153
 
4154
  ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4155
 
4156
  t = FIRST_SPEC_TYPE;
4157
  do
4158
    {
4159
      if ((ds1 & t) && !(ds2 & t))
4160
        ds |= ds1 & t;
4161
      else if (!(ds1 & t) && (ds2 & t))
4162
        ds |= ds2 & t;
4163
      else if ((ds1 & t) && (ds2 & t))
4164
        {
4165
          dw_t dw1 = get_dep_weak (ds1, t);
4166
          dw_t dw2 = get_dep_weak (ds2, t);
4167
          ds_t dw;
4168
 
4169
          if (!max_p)
4170
            {
4171
              dw = ((ds_t) dw1) * ((ds_t) dw2);
4172
              dw /= MAX_DEP_WEAK;
4173
              if (dw < MIN_DEP_WEAK)
4174
                dw = MIN_DEP_WEAK;
4175
            }
4176
          else
4177
            {
4178
              if (dw1 >= dw2)
4179
                dw = dw1;
4180
              else
4181
                dw = dw2;
4182
            }
4183
 
4184
          ds = set_dep_weak (ds, t, (dw_t) dw);
4185
        }
4186
 
4187
      if (t == LAST_SPEC_TYPE)
4188
        break;
4189
      t <<= SPEC_TYPE_SHIFT;
4190
    }
4191
  while (1);
4192
 
4193
  return ds;
4194
}
4195
 
4196
/* Return the join of two dep_statuses DS1 and DS2.
4197
   This function assumes that both DS1 and DS2 contain speculative bits.  */
4198
ds_t
4199
ds_merge (ds_t ds1, ds_t ds2)
4200
{
4201
  return ds_merge_1 (ds1, ds2, false);
4202
}
4203
 
4204
/* Return the join of two dep_statuses DS1 and DS2.  */
4205
ds_t
4206
ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4207
{
4208
  ds_t new_status = ds | ds2;
4209
 
4210
  if (new_status & SPECULATIVE)
4211
    {
4212
      if ((ds && !(ds & SPECULATIVE))
4213
          || (ds2 && !(ds2 & SPECULATIVE)))
4214
        /* Then this dep can't be speculative.  */
4215
        new_status &= ~SPECULATIVE;
4216
      else
4217
        {
4218
          /* Both are speculative.  Merging probabilities.  */
4219
          if (mem1)
4220
            {
4221
              dw_t dw;
4222
 
4223
              dw = estimate_dep_weak (mem1, mem2);
4224
              ds = set_dep_weak (ds, BEGIN_DATA, dw);
4225
            }
4226
 
4227
          if (!ds)
4228
            new_status = ds2;
4229
          else if (!ds2)
4230
            new_status = ds;
4231
          else
4232
            new_status = ds_merge (ds2, ds);
4233
        }
4234
    }
4235
 
4236
  return new_status;
4237
}
4238
 
4239
/* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4240
   probabilities.  */
4241
ds_t
4242
ds_max_merge (ds_t ds1, ds_t ds2)
4243
{
4244
  if (ds1 == 0 && ds2 == 0)
4245
    return 0;
4246
 
4247
  if (ds1 == 0 && ds2 != 0)
4248
    return ds2;
4249
 
4250
  if (ds1 != 0 && ds2 == 0)
4251
    return ds1;
4252
 
4253
  return ds_merge_1 (ds1, ds2, true);
4254
}
4255
 
4256
/* Return the probability of speculation success for the speculation
4257
   status DS.  */
4258
dw_t
4259
ds_weak (ds_t ds)
4260
{
4261
  ds_t res = 1, dt;
4262
  int n = 0;
4263
 
4264
  dt = FIRST_SPEC_TYPE;
4265
  do
4266
    {
4267
      if (ds & dt)
4268
        {
4269
          res *= (ds_t) get_dep_weak (ds, dt);
4270
          n++;
4271
        }
4272
 
4273
      if (dt == LAST_SPEC_TYPE)
4274
        break;
4275
      dt <<= SPEC_TYPE_SHIFT;
4276
    }
4277
  while (1);
4278
 
4279
  gcc_assert (n);
4280
  while (--n)
4281
    res /= MAX_DEP_WEAK;
4282
 
4283
  if (res < MIN_DEP_WEAK)
4284
    res = MIN_DEP_WEAK;
4285
 
4286
  gcc_assert (res <= MAX_DEP_WEAK);
4287
 
4288
  return (dw_t) res;
4289
}
4290
 
4291
/* Return a dep status that contains all speculation types of DS.  */
4292
ds_t
4293
ds_get_speculation_types (ds_t ds)
4294
{
4295
  if (ds & BEGIN_DATA)
4296
    ds |= BEGIN_DATA;
4297
  if (ds & BE_IN_DATA)
4298
    ds |= BE_IN_DATA;
4299
  if (ds & BEGIN_CONTROL)
4300
    ds |= BEGIN_CONTROL;
4301
  if (ds & BE_IN_CONTROL)
4302
    ds |= BE_IN_CONTROL;
4303
 
4304
  return ds & SPECULATIVE;
4305
}
4306
 
4307
/* Return a dep status that contains maximal weakness for each speculation
4308
   type present in DS.  */
4309
ds_t
4310
ds_get_max_dep_weak (ds_t ds)
4311
{
4312
  if (ds & BEGIN_DATA)
4313
    ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4314
  if (ds & BE_IN_DATA)
4315
    ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4316
  if (ds & BEGIN_CONTROL)
4317
    ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4318
  if (ds & BE_IN_CONTROL)
4319
    ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4320
 
4321
  return ds;
4322
}
4323
 
4324
/* Dump information about the dependence status S.  */
4325
static void
4326
dump_ds (FILE *f, ds_t s)
4327
{
4328
  fprintf (f, "{");
4329
 
4330
  if (s & BEGIN_DATA)
4331
    fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4332
  if (s & BE_IN_DATA)
4333
    fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4334
  if (s & BEGIN_CONTROL)
4335
    fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4336
  if (s & BE_IN_CONTROL)
4337
    fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4338
 
4339
  if (s & HARD_DEP)
4340
    fprintf (f, "HARD_DEP; ");
4341
 
4342
  if (s & DEP_TRUE)
4343
    fprintf (f, "DEP_TRUE; ");
4344
  if (s & DEP_OUTPUT)
4345
    fprintf (f, "DEP_OUTPUT; ");
4346
  if (s & DEP_ANTI)
4347
    fprintf (f, "DEP_ANTI; ");
4348
  if (s & DEP_CONTROL)
4349
    fprintf (f, "DEP_CONTROL; ");
4350
 
4351
  fprintf (f, "}");
4352
}
4353
 
4354
DEBUG_FUNCTION void
4355
debug_ds (ds_t s)
4356
{
4357
  dump_ds (stderr, s);
4358
  fprintf (stderr, "\n");
4359
}
4360
 
4361
#ifdef ENABLE_CHECKING
4362
/* Verify that dependence type and status are consistent.
4363
   If RELAXED_P is true, then skip dep_weakness checks.  */
4364
static void
4365
check_dep (dep_t dep, bool relaxed_p)
4366
{
4367
  enum reg_note dt = DEP_TYPE (dep);
4368
  ds_t ds = DEP_STATUS (dep);
4369
 
4370
  gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4371
 
4372
  if (!(current_sched_info->flags & USE_DEPS_LIST))
4373
    {
4374
      gcc_assert (ds == 0);
4375
      return;
4376
    }
4377
 
4378
  /* Check that dependence type contains the same bits as the status.  */
4379
  if (dt == REG_DEP_TRUE)
4380
    gcc_assert (ds & DEP_TRUE);
4381
  else if (dt == REG_DEP_OUTPUT)
4382
    gcc_assert ((ds & DEP_OUTPUT)
4383
                && !(ds & DEP_TRUE));
4384
  else if (dt == REG_DEP_ANTI)
4385
    gcc_assert ((ds & DEP_ANTI)
4386
                && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4387
  else
4388
    gcc_assert (dt == REG_DEP_CONTROL
4389
                && (ds & DEP_CONTROL)
4390
                && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4391
 
4392
  /* HARD_DEP can not appear in dep_status of a link.  */
4393
  gcc_assert (!(ds & HARD_DEP));
4394
 
4395
  /* Check that dependence status is set correctly when speculation is not
4396
     supported.  */
4397
  if (!sched_deps_info->generate_spec_deps)
4398
    gcc_assert (!(ds & SPECULATIVE));
4399
  else if (ds & SPECULATIVE)
4400
    {
4401
      if (!relaxed_p)
4402
        {
4403
          ds_t type = FIRST_SPEC_TYPE;
4404
 
4405
          /* Check that dependence weakness is in proper range.  */
4406
          do
4407
            {
4408
              if (ds & type)
4409
                get_dep_weak (ds, type);
4410
 
4411
              if (type == LAST_SPEC_TYPE)
4412
                break;
4413
              type <<= SPEC_TYPE_SHIFT;
4414
            }
4415
          while (1);
4416
        }
4417
 
4418
      if (ds & BEGIN_SPEC)
4419
        {
4420
          /* Only true dependence can be data speculative.  */
4421
          if (ds & BEGIN_DATA)
4422
            gcc_assert (ds & DEP_TRUE);
4423
 
4424
          /* Control dependencies in the insn scheduler are represented by
4425
             anti-dependencies, therefore only anti dependence can be
4426
             control speculative.  */
4427
          if (ds & BEGIN_CONTROL)
4428
            gcc_assert (ds & DEP_ANTI);
4429
        }
4430
      else
4431
        {
4432
          /* Subsequent speculations should resolve true dependencies.  */
4433
          gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4434
        }
4435
 
4436
      /* Check that true and anti dependencies can't have other speculative
4437
         statuses.  */
4438
      if (ds & DEP_TRUE)
4439
        gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4440
      /* An output dependence can't be speculative at all.  */
4441
      gcc_assert (!(ds & DEP_OUTPUT));
4442
      if (ds & DEP_ANTI)
4443
        gcc_assert (ds & BEGIN_CONTROL);
4444
    }
4445
}
4446
#endif /* ENABLE_CHECKING */
4447
 
4448
#endif /* INSN_SCHEDULING */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.